source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_target_codegen_vulkan.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import re
import numpy as np
@tvm.testing.requires_vulkan
def test_vector_comparison():
target = 'vulkan'
def check_correct_assembly(dtype):
n = (1024,)
A = te.placeholder(n, dtype=dtype, name='A')
B = te.compute(
A.shape,
lambda i: tvm.tir.Select(
A[i] >= 0, A[i] + tvm.tir.const(1, dtype),
tvm.tir.const(0, dtype)), name='B')
s = te.create_schedule(B.op)
(bx, tx) = s[B].split(s[B].op.axis[0], factor=128)
(tx, vx) = s[B].split(tx, factor=4)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
s[B].vectorize(vx)
f = tvm.build(s, [A, B], target)
# Verify we generate the boolx4 type declaration and the OpSelect
# v4{float,half,int} instruction
assembly = f.imported_modules[0].get_source()
matches = re.findall("%v4bool = OpTypeVector %bool 4", assembly)
assert len(matches) == 1
matches = re.findall("OpSelect %v4.*", assembly)
assert len(matches) == 1
check_correct_assembly('float32')
check_correct_assembly('int32')
check_correct_assembly('float16')
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
@tvm.testing.requires_vulkan
def test_vulkan_copy():
def check_vulkan(dtype, n):
A = te.placeholder((n,), name='A', dtype=dtype)
ctx = tvm.vulkan(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(a_np)
b_np = a.asnumpy()
tvm.testing.assert_allclose(a_np, b_np)
tvm.testing.assert_allclose(a_np, a.asnumpy())
for _ in range(100):
dtype = np.random.choice(["float32", "float16", "int8", "int32"])
logN = np.random.randint(1, 15)
peturb = np.random.uniform(low=0.5, high=1.5)
check_vulkan(dtype, int(peturb * (2 ** logN)))
@tvm.testing.requires_vulkan
def test_vulkan_vectorize_add():
num_thread = 8
def check_vulkan(dtype, n, lanes):
A = te.placeholder((n,), name='A', dtype="%sx%d" % (dtype, lanes))
B = te.compute((n,), lambda i: A[i]+tvm.tir.const(1, A.dtype), name='B')
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
fun = tvm.build(s, [A, B], "vulkan")
ctx = tvm.vulkan(0)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(
np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), B.dtype, ctx)
fun(a, c)
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1)
check_vulkan("float32", 64, 2)
check_vulkan("float16", 64, 2)
@tvm.testing.requires_vulkan
def test_vulkan_stress():
"""
Launch a randomized test with multiple kernels per stream, multiple uses of
kernels per stream, over multiple threads.
"""
import random
import threading
n = 1024
num_thread = 64
def run_stress():
def worker():
A = te.placeholder((n,), name='A', dtype="float32")
B = te.placeholder((n,), name='B', dtype="float32")
functions = [
(lambda: te.compute((n,), lambda i: 2 * A[i] + 3 * B[i]),
lambda a, b: 2 * a + 3 * b),
(lambda: te.compute((n,), lambda i: A[i]+B[i]),
lambda a, b: a + b),
(lambda: te.compute((n,), lambda i: A[i]+2 * B[i]),
lambda a, b: a + 2 * b),
]
def build_f(f_ref):
(C_f, ref) = f_ref
C = C_f()
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
s[C].bind(xo, bx)
s[C].bind(xi, tx)
fun = tvm.build(s, [A, B, C], "vulkan")
return (fun, ref)
fs = [build_f(random.choice(functions))
for _ in range(np.random.randint(low=1, high=10))]
ctx = tvm.vulkan(0)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(
np.random.uniform(size=(n,)))
b = tvm.nd.empty((n,), B.dtype, ctx).copyfrom(
np.random.uniform(size=(n,)))
cs = [tvm.nd.empty((n,), A.dtype, ctx) for _ in fs]
for ((f, _), c) in zip(fs, cs):
f(a, b, c)
for ((_, ref), c) in zip(fs, cs):
tvm.testing.assert_allclose(
c.asnumpy(), ref(a.asnumpy(), b.asnumpy()))
ts = [threading.Thread(target=worker)
for _ in range(np.random.randint(1, 10))]
for t in ts:
t.start()
for t in ts:
t.join()
run_stress()
if __name__ == "__main__":
test_vector_comparison()
test_vulkan_copy()
test_vulkan_vectorize_add()
test_vulkan_stress()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, InvoiceError)
from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN
from electrum.lnutil import PaymentFailure, SENT, RECEIVED
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.paymentrequest import PR_PAID
from electrum.util import pr_expiration_values
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
MONOSPACE_FONT)
from .util import ButtonsTextEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.send_tab_is_onchain = False
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab(wallet)
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
if self.config.get('lightning'):
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels', 'payment_received',
'payment_status']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'channels':
self.channels_list.update_rows.emit()
elif event == 'channel':
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'payment_status':
self.on_payment_status(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
elif event == 'payment_received':
wallet, key, status = args
if wallet == self.wallet:
self.notify(_('Payment received') + '\n' + key)
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.lnworker:
wallet.lnworker.on_channels_updated()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
if self.config.get('lightning'):
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
if self.config.get('lightning'):
tools_menu.addAction(_("&Lightning"), self.gui_object.show_lightning_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.lnworker:
l = self.wallet.lnworker.get_balance()
text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_channels_tab(self, wallet):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', 3600)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('On-chain'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.config.get('lightning'):
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=230)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox_r = QHBoxLayout()
hbox_r.addWidget(self.receive_qr)
hbox_r.addWidget(self.receive_address_e)
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addLayout(hbox_r)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_request(self, key):
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', 3600)
if is_lightning:
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
self.address_list.update()
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
def create_bitcoin_request(self, amount, message, expiration):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, title, content):
self.app.clipboard().setText(content)
self.show_message(_("{} copied to clipboard").format(title))
#QToolTip.showText(QCursor.pos(), _("{} copied to clipboard").format(title), self.parent)
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def clear_receive_tab(self):
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
def update_receive_qr(self):
uri = str(self.receive_address_e.text())
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
# note: 'addr' could be ln invoice or BIP21 URI
try:
uri = util.parse_URI(addr)
except InvalidBitcoinURI:
pass
else:
addr = uri.get('address')
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(self.amount_e.width())
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 3, 4)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
self.feecontrol_fields = QWidget()
vbox_feecontrol = QVBoxLayout(self.feecontrol_fields)
vbox_feecontrol.setContentsMargins(0, 0, 0, 0)
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addWidget(self.feecontrol_fields, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_button)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.is_onchain:
return
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs = self.read_outputs()
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + '\t' + "%s"%x.get('address') + '\t'
for coin in self.pay_from:
item = QTreeWidgetItem([format(coin), self.format_amount(coin['value'])])
item.setFont(0, QFont(MONOSPACE_FONT))
self.from_list.addTopLevelItem(item)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
@protected
def protect(self, func, args, password):
return func(*args, password)
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_outputs(self):
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice):
amount_sat = self.amount_e.get_amount()
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_sat, attempts)
self.do_clear()
self.wallet.thread.add(task)
self.invoice_list.update()
def on_payment_status(self, key, status, *args):
# todo: check that key is in this wallet's invoice list
self.invoice_list.update()
if status == 'success':
self.show_message(_('Payment succeeded'))
self.need_update.set()
elif status == 'progress':
print('on_payment_status', key, status, args)
elif status == 'failure':
self.show_error(_('Payment failed'))
elif status == 'error':
e = args[0]
self.show_error(_('Error') + '\n' + str(e))
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self.is_onchain:
invoice = self.payto_e.lightning_invoice
if not invoice:
return
if not self.wallet.lnworker:
self.show_error(_('Lightning is disabled'))
return
return self.wallet.lnworker.parse_bech32_invoice(invoice)
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(outputs, message, self.payment_request, self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_preview(self):
self.do_pay(preview=True)
def do_pay(self, preview=False):
invoice = self.read_invoice()
if not invoice:
return
if not preview:
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
self.do_pay_invoice(invoice, preview)
def do_pay_invoice(self, invoice, preview=False):
if invoice['type'] == PR_TYPE_LN:
self.pay_lightning_invoice(invoice['invoice'])
return
elif invoice['type'] == PR_TYPE_ONCHAIN:
message = invoice['message']
outputs = invoice['outputs']
else:
raise Exception('unknown invoice type')
if run_hook('abort_send', self):
return
outputs = [TxOutput(*x) for x in outputs]
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x: x.value, outputs))
fee = tx.get_fee()
use_rbf = bool(self.config.get('use_rbf', True))
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, message)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, message)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
key = pr.get_id()
#self.wallet.set_invoice_paid(key, tx.txid())
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
@protected
def open_channel(self, *args, **kwargs):
def task():
return self.wallet.lnworker.open_channel(*args, **kwargs)
def on_success(chan):
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
self.show_message(message)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(e))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and invoice['status'] == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
from electrum.lnaddr import lndecode, LnDecodeException
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.amount is not None:
self.amount_e.setAmount(lnaddr.amount * COIN)
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self.is_onchain = b
self.preview_button.setEnabled(b)
self.max_button.setEnabled(b)
self.show_send_tab_onchain_fees(b)
def show_send_tab_onchain_fees(self, b: bool):
self.feecontrol_fields.setVisible(b)
self.fee_e_label.setVisible(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.is_onchain = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.set_onchain(len(coins) > 0)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
invoice = self.wallet.get_invoice(key)
if invoice is None:
self.show_error('Cannot find payment request in wallet.')
return
bip70 = invoice.get('bip70')
if bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(bip70))
pr.verify(self.contacts)
self.show_bip70_details(pr)
def show_bip70_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("BIP70 Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.wallet.delete_invoices(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def pay_bip70_invoice(self, key):
pr = self.wallet.get_invoice(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.storage.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
# only show the combobox if multiple master keys are defined
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
btns = run_hook('wallet_info_buttons', self, dialog) or Buttons(CloseButton(dialog))
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt) -> Optional[Transaction]:
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(repr(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.storage.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: Transaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
NewSentenceTransformer.py
|
import json
import logging
import os
import shutil
import stat
import warnings
from collections import OrderedDict
from functools import partial
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable, Optional
import requests
import numpy as np
from numpy import ndarray
import transformers
from huggingface_hub import HfApi, HfFolder, Repository, hf_hub_url, cached_download
import torch
from torch import nn, Tensor, device
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
from tqdm.autonotebook import trange
import math
import queue
import torch.distributed as dist
import tempfile
from distutils.dir_util import copy_tree
from accelerate import Accelerator
from torch.utils.tensorboard import SummaryWriter
from . import __MODEL_HUB_ORGANIZATION__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, fullname, snapshot_download, mismatched_sizes_all_gather
from .models import Transformer, Pooling, Dense
from .model_card_templates import ModelCardTemplate
from . import __version__
import math
logger = logging.getLogger(__name__)
class SentenceTransformer(nn.Sequential):
"""
Loads or create a SentenceTransformer model, that can be used to map sentences / text to embeddings.
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name.
:param modules: This parameter can be used to create custom SentenceTransformer models from scratch.
:param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used.
:param cache_folder: Path to store models
"""
def __init__(self, model_name_or_path: Optional[str] = None, modules: Optional[Iterable[nn.Module]] = None, device: Optional[str] = None, cache_folder: Optional[str] = None, **auto_model_kwargs):
self._model_card_vars = {}
self._model_card_text = None
self._model_config = {}
if cache_folder is None:
#cache_folder = os.getenv('SENTENCE_TRANSFORMERS_HOME')
cache_folder="/data/private/huxiaomeng/pretrained_models/"
if cache_folder is None:
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
cache_folder = os.path.join(torch_cache_home, 'sentence_transformers')
if model_name_or_path is not None and model_name_or_path != "":
logger.info("Load pretrained SentenceTransformer: {}".format(model_name_or_path))
#Old models that don't belong to any organization
basic_transformer_models = ['albert-base-v1', 'albert-base-v2', 'albert-large-v1', 'albert-large-v2', 'albert-xlarge-v1', 'albert-xlarge-v2', 'albert-xxlarge-v1', 'albert-xxlarge-v2', 'bert-base-cased-finetuned-mrpc', 'bert-base-cased', 'bert-base-chinese', 'bert-base-german-cased', 'bert-base-german-dbmdz-cased', 'bert-base-german-dbmdz-uncased', 'bert-base-multilingual-cased', 'bert-base-multilingual-uncased', 'bert-base-uncased', 'bert-large-cased-whole-word-masking-finetuned-squad', 'bert-large-cased-whole-word-masking', 'bert-large-cased', 'bert-large-uncased-whole-word-masking-finetuned-squad', 'bert-large-uncased-whole-word-masking', 'bert-large-uncased', 'camembert-base', 'ctrl', 'distilbert-base-cased-distilled-squad', 'distilbert-base-cased', 'distilbert-base-german-cased', 'distilbert-base-multilingual-cased', 'distilbert-base-uncased-distilled-squad', 'distilbert-base-uncased-finetuned-sst-2-english', 'distilbert-base-uncased', 'distilgpt2', 'distilroberta-base', 'gpt2-large', 'gpt2-medium', 'gpt2-xl', 'gpt2', 'openai-gpt', 'roberta-base-openai-detector', 'roberta-base', 'roberta-large-mnli', 'roberta-large-openai-detector', 'roberta-large', 't5-11b', 't5-3b', 't5-base', 't5-large', 't5-small', 'transfo-xl-wt103', 'xlm-clm-ende-1024', 'xlm-clm-enfr-1024', 'xlm-mlm-100-1280', 'xlm-mlm-17-1280', 'xlm-mlm-en-2048', 'xlm-mlm-ende-1024', 'xlm-mlm-enfr-1024', 'xlm-mlm-enro-1024', 'xlm-mlm-tlm-xnli15-1024', 'xlm-mlm-xnli15-1024', 'xlm-roberta-base', 'xlm-roberta-large-finetuned-conll02-dutch', 'xlm-roberta-large-finetuned-conll02-spanish', 'xlm-roberta-large-finetuned-conll03-english', 'xlm-roberta-large-finetuned-conll03-german', 'xlm-roberta-large', 'xlnet-base-cased', 'xlnet-large-cased']
if os.path.exists(model_name_or_path):
#Load from path
model_path = model_name_or_path
else:
#Not a path, load from hub
if '\\' in model_name_or_path or model_name_or_path.count('/') > 1:
raise ValueError("Path {} not found".format(model_name_or_path))
if '/' not in model_name_or_path and model_name_or_path.lower() not in basic_transformer_models:
# A model from sentence-transformers
model_name_or_path = __MODEL_HUB_ORGANIZATION__ + "/" + model_name_or_path
model_path = os.path.join(cache_folder, model_name_or_path.replace("/", "_"))
# Download from hub with caching
snapshot_download(model_name_or_path,
cache_dir=model_path,
library_name='sentence-transformers',
library_version=__version__,
ignore_files=['flax_model.msgpack', 'rust_model.ot', 'tf_model.h5'])
if os.path.exists(os.path.join(model_path, 'modules.json')): #Load as SentenceTransformer model
modules = self._load_sbert_model(model_path)
else: #Load with AutoModel
modules = self._load_auto_model(model_path, **auto_model_kwargs)
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)])
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info("Use pytorch device: {}".format(device))
self._target_device = torch.device(device)
def encode(self, sentences: Union[str, List[str]],
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
device: str = None,
normalize_embeddings: bool = False,
num_proc=None,
accelerator: Accelerator=None) -> Union[List[Tensor], ndarray, Tensor]:
## sentences are load in device , model has also been load into accelerator's device.
"""
Computes sentence embeddings
:param sentences: the sentences to embed
:param batch_size: the batch size used for the computation
:param show_progress_bar: Output a progress bar when encode sentences
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings. Set to None, to get all output values
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy
:param device: Which torch.device to use for the computation. By default, with
:param normalize_embeddings: If set to true, returned vectors will have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used.
:param num_proc: How many processes to distribute the computation through. With `device=None`, will distribute the computation through all available GPUs.
:return:
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
"""
self.eval()
if show_progress_bar is None:
show_progress_bar = (logger.getEffectiveLevel()==logging.INFO or logger.getEffectiveLevel()==logging.DEBUG)
if convert_to_tensor:
convert_to_numpy = False
if output_value != 'sentence_embedding':
convert_to_tensor = False
convert_to_numpy = False
input_was_string = False
if isinstance(sentences, str) or not hasattr(sentences, '__len__'): #Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
#length_sorted_idx = np.argsort([-self._text_length(sen) for sen in sentences])
#sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
# For distributed training with accelerator
if accelerator is not None:
#print(all_embeddings.device)
#print(len(sentences))
for start_index in trange(0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar):
end_idx=min(len(sentences),start_index+batch_size)
sentences_batch = sentences[start_index:end_idx]
batch_embeddings = self._encode(sentences_batch, device=accelerator.device, output_value=output_value,
convert_to_numpy=convert_to_numpy, normalize_embeddings=normalize_embeddings,
multiprocessing=True,accelerator=accelerator)
#print(batch_embeddings.device)
if start_index == 0:
all_embeddings=batch_embeddings
else:
all_embeddings=torch.cat((all_embeddings,batch_embeddings),0)
all_embeddings=[all_embeddings[idx] for idx in range(len(all_embeddings))]
if convert_to_tensor:
all_embeddings = torch.stack(all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
#all_embeddings=list(all_embeddings)
#local_embeddings = torch.stack(local_embeddings)
# gathering everything thanks to the size information from earlier
#all_embeddings = mismatched_sizes_all_gather(local_embeddings)
#all_embeddings = torch.cat(all_embeddings)
# For distributed training without accelerator
def _encode(self, sentences_batch, device, output_value: str = 'sentence_embedding', convert_to_numpy: bool = False,
normalize_embeddings: bool = False, multiprocessing=False,accelerator: Accelerator=None):
if accelerator is not None:
with torch.no_grad():
features = self.tokenize(sentences_batch)
features = batch_to_device(features, device)
out_features = self.forward(features)
if output_value == 'token_embeddings':
embeddings = []
for token_emb, attention in zip(out_features[output_value], out_features['attention_mask']):
last_mask_id = len(attention) - 1
while last_mask_id > 0 and attention[last_mask_id].item() == 0:
last_mask_id -= 1
embeddings.append(token_emb[0:last_mask_id + 1])
elif output_value is None: # Return all outputs
embeddings = []
for sent_idx in range(len(out_features['sentence_embedding'])):
row = {name: out_features[name][sent_idx] for name in out_features}
embeddings.append(row)
else: # Sentence embeddings
embeddings = out_features[output_value]
embeddings = embeddings.detach()
if normalize_embeddings:
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
# fixes for #522 and #487 to avoid oom problems on gpu with large datasets
if convert_to_numpy:
embeddings = embeddings.cpu()
return embeddings
if multiprocessing:
rank = mp.current_process()._identity[0]
if device is None and torch.cuda.is_available():
device = f"cuda:{rank % torch.cuda.device_count()}"
self.to(device)
features = self.tokenize(sentences_batch)
features = batch_to_device(features, device)
with torch.no_grad():
out_features = self.forward(features)
if output_value == 'token_embeddings':
embeddings = []
for token_emb, attention in zip(out_features[output_value], out_features['attention_mask']):
last_mask_id = len(attention) - 1
while last_mask_id > 0 and attention[last_mask_id].item() == 0:
last_mask_id -= 1
embeddings.append(token_emb[0:last_mask_id + 1])
elif output_value is None: # Return all outputs
embeddings = []
for sent_idx in range(len(out_features['sentence_embedding'])):
row = {name: out_features[name][sent_idx] for name in out_features}
embeddings.append(row)
else: # Sentence embeddings
embeddings = out_features[output_value]
embeddings = embeddings.detach()
if normalize_embeddings:
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
# fixes for #522 and #487 to avoid oom problems on gpu with large datasets
if convert_to_numpy:
embeddings = embeddings.cpu()
return embeddings
def start_multi_process_pool(self, target_devices: List[str] = None):
"""
Starts multi process to process the encoding with several, independent processes.
This method is recommended if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logger.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu']*4
logger.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=SentenceTransformer._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
def encode_multi_process(self, sentences: List[str], pool: Dict[str, object], batch_size: int = 32, chunk_size: int = None):
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param sentences: List of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param batch_size: Encode sentences with batch size
:param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size.
:return: Numpy matrix with all embeddings
"""
if chunk_size is None:
chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000)
logger.info("Chunk data into packages of size {}".format(chunk_size))
input_queue = pool['input']
last_chunk_id = 0
chunk = []
for sentence in sentences:
chunk.append(sentence)
if len(chunk) >= chunk_size:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
chunk = []
if len(chunk) > 0:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in range(last_chunk_id)], key=lambda x: x[0])
embeddings = np.concatenate([result[1] for result in results_list])
return embeddings
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue):
"""
Internal working process to encode sentences in multi-process setup
"""
while True:
try:
id, batch_size, sentences = input_queue.get()
embeddings = model.encode(sentences, device=target_device, show_progress_bar=False, convert_to_numpy=True, batch_size=batch_size)
results_queue.put([id, embeddings])
except queue.Empty:
break
def get_max_seq_length(self):
"""
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
"""
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, texts: Union[List[str], List[Dict], List[Tuple[str, str]]]):
"""
Tokenizes the texts
"""
return self._first_module().tokenize(texts)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
for mod in reversed(self._modules.values()):
sent_embedding_dim_method = getattr(mod, "get_sentence_embedding_dimension", None)
if callable(sent_embedding_dim_method):
return sent_embedding_dim_method()
return None
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path: str, model_name: Optional[str] = None, create_model_card: bool = True):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
:param path: Path on disc
:param model_name: Optional model name
:param create_model_card: If True, create a README.md with basic information about this model
"""
if path is None:
return
os.makedirs(path, exist_ok=True)
logger.info("Save model to {}".format(path))
modules_config = []
#Save some model info
if '__version__' not in self._model_config:
self._model_config['__version__'] = {
'sentence_transformers': __version__,
'transformers': transformers.__version__,
'pytorch': torch.__version__,
}
with open(os.path.join(path, 'config_sentence_transformers.json'), 'w') as fOut:
json.dump(self._model_config, fOut, indent=2)
#Save modules
for idx, name in enumerate(self._modules):
module = self._modules[name]
if idx == 0 and isinstance(module, Transformer): #Save transformer model in the main folder
model_path = path + "/"
else:
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
modules_config.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(modules_config, fOut, indent=2)
# Create model card
if create_model_card:
self._create_model_card(path, model_name)
def _create_model_card(self, path: str, model_name: Optional[str] = None):
"""
Create an automatic model and stores it in path
"""
if self._model_card_text is not None and len(self._model_card_text) > 0:
model_card = self._model_card_text
else:
tags = ModelCardTemplate.__TAGS__.copy()
model_card = ModelCardTemplate.__MODEL_CARD__
if len(self._modules) == 2 and isinstance(self._first_module(), Transformer) and isinstance(self._last_module(), Pooling) and self._last_module().get_pooling_mode_str() in ['cls', 'max', 'mean']:
pooling_module = self._last_module()
pooling_mode = pooling_module.get_pooling_mode_str()
model_card = model_card.replace("{USAGE_TRANSFORMERS_SECTION}", ModelCardTemplate.__USAGE_TRANSFORMERS__)
pooling_fct_name, pooling_fct = ModelCardTemplate.model_card_get_pooling_function(pooling_mode)
model_card = model_card.replace("{POOLING_FUNCTION}", pooling_fct).replace("{POOLING_FUNCTION_NAME}", pooling_fct_name).replace("{POOLING_MODE}", pooling_mode)
tags.append('transformers')
# Print full model
model_card = model_card.replace("{FULL_MODEL_STR}", str(self))
# Add tags
model_card = model_card.replace("{TAGS}", "\n".join(["- "+t for t in tags]))
# Add dim info
self._model_card_vars["{NUM_DIMENSIONS}"] = self.get_sentence_embedding_dimension()
# Replace vars we created while using the model
for name, value in self._model_card_vars.items():
model_card = model_card.replace(name, str(value))
# Replace remaining vars with default values
for name, value in ModelCardTemplate.__DEFAULT_VARS__.items():
model_card = model_card.replace(name, str(value))
if model_name is not None:
model_card = model_card.replace("{MODEL_NAME}", model_name.strip())
with open(os.path.join(path, "README.md"), "w", encoding='utf8') as fOut:
fOut.write(model_card.strip())
def save_to_hub(self,
repo_name: str,
organization: Optional[str] = None,
private: Optional[bool] = None,
commit_message: str = "Add new SentenceTransformer model.",
local_model_path: Optional[str] = None,
exist_ok: bool = False,
replace_model_card: bool = False):
"""
Uploads all elements of this Sentence Transformer to a new HuggingFace Hub repository.
:param repo_name: Repository name for your model in the Hub.
:param organization: Organization in which you want to push your model or tokenizer (you must be a member of this organization).
:param private: Set to true, for hosting a prive model
:param commit_message: Message to commit while pushing.
:param local_model_path: Path of the model locally. If set, this file path will be uploaded. Otherwise, the current model will be uploaded
:param exist_ok: If true, saving to an existing repository is OK. If false, saving only to a new repository is possible
:param replace_model_card: If true, replace an existing model card in the hub with the automatically created model card
:return: The url of the commit of your model in the given repository.
"""
token = HfFolder.get_token()
if token is None:
raise ValueError("You must login to the Hugging Face hub on this computer by typing `transformers-cli login`.")
if '/' in repo_name:
splits = repo_name.split('/', maxsplit=1)
if organization is None or organization == splits[0]:
organization = splits[0]
repo_name = splits[1]
else:
raise ValueError("You passed and invalid repository name: {}.".format(repo_name))
endpoint = "https://huggingface.co"
repo_url = HfApi(endpoint=endpoint).create_repo(
token,
repo_name,
organization=organization,
private=private,
repo_type=None,
exist_ok=exist_ok,
)
full_model_name = repo_url[len(endpoint)+1:].strip("/")
with tempfile.TemporaryDirectory() as tmp_dir:
# First create the repo (and clone its content if it's nonempty).
logging.info("Create repository and clone it if it exists")
repo = Repository(tmp_dir, clone_from=repo_url)
# If user provides local files, copy them.
if local_model_path:
copy_tree(local_model_path, tmp_dir)
else: # Else, save model directly into local repo.
create_model_card = replace_model_card or not os.path.exists(os.path.join(tmp_dir, 'README.md'))
self.save(tmp_dir, model_name=full_model_name, create_model_card=create_model_card)
#Find files larger 5M and track with git-lfs
large_files = []
for root, dirs, files in os.walk(tmp_dir):
for filename in files:
file_path = os.path.join(root, filename)
rel_path = os.path.relpath(file_path, tmp_dir)
if os.path.getsize(file_path) > (5 * 1024 * 1024):
large_files.append(rel_path)
if len(large_files) > 0:
logging.info("Track files with git lfs: {}".format(", ".join(large_files)))
repo.lfs_track(large_files)
logging.info("Push model to the hub. This might take a while")
push_return = repo.push_to_hub(commit_message=commit_message)
def on_rm_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and unlink it.
try:
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
except:
pass
# Remove .git folder. On Windows, the .git folder might be read-only and cannot be deleted
# Hence, try to set write permissions on error
try:
for f in os.listdir(tmp_dir):
shutil.rmtree(os.path.join(tmp_dir, f), onerror=on_rm_error)
except Exception as e:
logging.warning("Error when deleting temp folder: {}".format(str(e)))
pass
return push_return
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
Here, batch is a list of tuples: [(tokens, label), ...]
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0].texts)
texts = [[] for _ in range(num_texts)]
labels = []
for example in batch:
for idx, text in enumerate(example.texts):
texts[idx].append(text)
labels.append(example.label)
labels = torch.tensor(labels).to(self._target_device)
sentence_features = []
for idx in range(num_texts):
tokenized = self.tokenize(texts[idx])
batch_to_device(tokenized, self._target_device)
sentence_features.append(tokenized)
return sentence_features, labels
def _text_length(self, text: Union[List[int], List[List[int]]]):
"""
Help function to get the length for the input text. Text can be either
a list of ints (which means a single text as input), or a tuple of list of ints
(representing several text inputs to the model).
"""
if isinstance(text, dict): #{key: value} case
return len(next(iter(text.values())))
elif not hasattr(text, '__len__'): #Object has no len() method
return 1
elif len(text) == 0 or isinstance(text[0], int): #Empty string or list of ints
return len(text)
else:
return sum([len(t) for t in text]) #Sum of length of individual strings
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
#evaluator: SentenceEvaluator = None,
epochs: int = 10,
steps_per_epoch: int = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
gradient_accumulation: int = 1,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params: Dict[str, object] = None,
weight_decay: float = 0.01,
max_grad_norm: float = 1,
use_amp: bool = False,
#callback: Callable[[float, int, int], None] = None,
show_progress_bar: bool = True,
checkpoint_save_folder: str = None,
#checkpoint_save_steps: int = 500,
#checkpoint_save_total_limit: int = 0,
accelerator: Accelerator = None,
tb: SummaryWriter =None,
round: int =0,
stage: int = 0
):
"""
Train the model with the given training objective
Each training objective is sampled in turn for one batch.
We sample only as many batches from each objective as there are in the smallest one
to make sure of equal training with each dataset.
:param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning
:param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc.
:param epochs: Number of epochs for training
:param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives.
:param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
:param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero.
:param gradient_accumulation: number of steps to take before gradient updates
:param optimizer_class: Optimizer
:param optimizer_params: Optimizer parameters
:param weight_decay: Weight decay for model parameters#!/bin/bash
:param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps
:param output_path: Storage path for the model and evaluation files
:param save_best_model: If true, the best model (according to evaluator) is stored at output_path
:param max_grad_norm: Used for gradient normalization.
:param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0
:param callback: Callback function that is invoked after each evaluation.
It must accept the following three parameters in this order:
`score`, `epoch`, `steps`
:param show_progress_bar: If True, output a tqdm progress bar
:param checkpoint_path: Folder to save checkpoints during training
:param checkpoint_save_steps: Will save a checkpoint after so many steps
:param checkpoint_save_total_limit: Total number of checkpoints to store
:param accelerator: Allows you to pass your own accelerator object defined beforehand.
"""
# replacing mutable arguments
if optimizer_params is None:
optimizer_params = {'lr': 2e-5}
##Add info to model card
#info_loss_functions = "\n".join(["- {} with {} training examples".format(str(loss), len(dataloader)) for dataloader, loss in train_objectives])
info_loss_functions = []
for dataloader, loss in train_objectives:
info_loss_functions.extend(ModelCardTemplate.get_train_objective_info(dataloader, loss))
info_loss_functions = "\n\n".join([text for text in info_loss_functions])
info_fit_parameters = json.dumps({"epochs": epochs, "steps_per_epoch": steps_per_epoch, "scheduler": scheduler, "warmup_steps": warmup_steps, "optimizer_class": str(optimizer_class), "optimizer_params": optimizer_params, "weight_decay": weight_decay, "max_grad_norm": max_grad_norm }, indent=4, sort_keys=True)
self._model_card_text = None
self._model_card_vars['{TRAINING_SECTION}'] = ModelCardTemplate.__TRAINING_SECTION__.replace("{LOSS_FUNCTIONS}", info_loss_functions).replace("{FIT_PARAMETERS}", info_fit_parameters)
# accelerate setup
if accelerator is None:
accelerator = Accelerator()
# self.to(self._target_device)
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
dataloaders = [accelerator.prepare(dataloader) for dataloader in dataloaders]
device = accelerator.device
loss_models = [loss.to(device) for _, loss in train_objectives]# update
loss_models = [accelerator.prepare(loss_model) for loss_model in loss_models]
# for loss_model in loss_models:
# loss_model.to(self._target_device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
optimizers = [accelerator.prepare(optimizer) for optimizer in optimizers]
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
for _ in trange(math.ceil(steps_per_epoch/gradient_accumulation),desc="Iteration", smoothing=0.05, disable=not show_progress_bar):
for _ in range(gradient_accumulation):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = data
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
loss_value=loss_value/gradient_accumulation
scale_before_step = scaler.get_scale()
accelerator.backward(scaler.scale(loss_value))
#training_steps += 1
#if training_steps % gradient_accumulation == 0:
# scaler.unscale_(optimizer)
# torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
# scaler.step(optimizer)
# scaler.update()
# skip_scheduler = scaler.get_scale() != scale_before_step
# optimizer.zero_grad()
# if not skip_scheduler:
# scheduler.step()
# global_step += 1
else:
loss_value = loss_model(features, labels)
loss_value=loss_value/gradient_accumulation
accelerator.backward(loss_value)
#if training_steps % gradient_accumulation == 0:
# torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
# optimizer.step()
# optimizer.zero_grad()
# if not skip_scheduler:
# scheduler.step()
# global_step += 1
#training_steps += 1
if use_amp:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
else:
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
global_step += 1
dist.barrier()
if tb:
tb.add_scalar("loss",loss_value.item(),global_step)
dist.barrier()
#if evaluation_steps > 0 and global_step % evaluation_steps == 0:
# self._eval_during_training(evaluator, checkpoint_path, save_best_model, epoch, global_step, callback,
# main_process=accelerator.is_main_process)
#
# for loss_model in loss_models:
# loss_model.zero_grad()
# loss_model.train()
#if checkpoint_path is not None and checkpoint_save_steps is not None and checkpoint_save_steps > 0 \
# and global_step % checkpoint_save_steps == 0 and accelerator.is_main_process:
# self._save_checkpoint(checkpoint_path, checkpoint_save_total_limit, global_step)
dist.barrier()
if accelerator.is_main_process:
self._save_checkpoint(checkpoint_save_folder, round,stage)
dist.barrier()
#self._eval_during_training(evaluator, output_path, save_best_model, epoch, -1, callback,
# main_process=accelerator.is_main_process)
#if accelerator.is_main_process:
# if evaluator is None and output_path is not None: #No evaluator, but output path: save final model version
# self.save(output_path)
# if checkpoint_path is not None:
#
#def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
# """
# Evaluate the model
#
# :param evaluator:
# the evaluator
# :param output_path:
# the evaluator can write the results to this path
# """
# if output_path is not None:
# os.makedirs(output_path, exist_ok=True)
# return evaluator(self, output_path)
#def _eval_during_training(self, evaluator, checkpoint_path, save_best_model, epoch, steps, callback, main_process=True):
# """Runs evaluation during the training"""
# if checkpoint_path is not None:
# os.makedirs(checkpoint_path, exist_ok=True)
#
# if evaluator is not None:
# #score = evaluator(self, output_path=eval_path, epoch=epoch, steps=steps)
# score=evaluator(self,spoch=epoch,steps=steps)
# if callback is not None and main_process:
# callback(score, epoch, steps)
# dist.barrier()
# if score > self.best_score and main_process:
# self.best_score = score
# if save_best_model:
# self.save(os.path.join(checkpoint_path,"best_model"))
# dist.barrier()
def _save_checkpoint(self, checkpoint_save_folder, round,stage):
# Store new checkpoint
if not os.path.exists(checkpoint_save_folder):
os.mkdir(checkpoint_save_folder)
checkpoint_save_path=os.path.join(checkpoint_save_folder, "round{}-stage{}".format(round,stage))
self.save(checkpoint_save_path)
map_path=os.path.join(checkpoint_save_folder,"checkpoint_path.tsv")
with open(map_path,'a') as f:
f.write("{}\t{}\t{}\n".format(round,stage,checkpoint_save_path))
# Delete old checkpoints
#if checkpoint_save_total_limit is not None and checkpoint_save_total_limit > 0:
# old_checkpoints = []
# for subdir in os.listdir(checkpoint_path):
# if subdir.isdigit():
# old_checkpoints.append({'step': int(subdir), 'path': os.path.join(checkpoint_path, subdir)})
# if len(old_checkpoints) > checkpoint_save_total_limit:
# old_checkpoints = sorted(old_checkpoints, key=lambda x: x['step'])
# shutil.rmtree(old_checkpoints[0]['path'])
def _load_auto_model(self, model_name_or_path, **auto_model_kwargs):
"""
Creates a simple Transformer + Mean Pooling model and returns the modules
"""
logging.warning("No sentence-transformers model found with name {}. Creating a new one with MEAN pooling.".format(model_name_or_path))
transformer_model = Transformer(model_name_or_path, **auto_model_kwargs)
pooling_model = Pooling(transformer_model.get_word_embedding_dimension(), 'mean')
return [transformer_model, pooling_model]
def _load_sbert_model(self, model_path):
"""
Loads a full sentence-transformers model
"""
# Check if the config_sentence_transformers.json file exists (exists since v2 of the framework)
config_sentence_transformers_json_path = os.path.join(model_path, 'config_sentence_transformers.json')
if os.path.exists(config_sentence_transformers_json_path):
with open(config_sentence_transformers_json_path) as fIn:
self._model_config = json.load(fIn)
if '__version__' in self._model_config and 'sentence_transformers' in self._model_config['__version__'] and self._model_config['__version__']['sentence_transformers'] > __version__:
logger.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(self._model_config['__version__']['sentence_transformers'], __version__))
# Check if a readme exists
model_card_path = os.path.join(model_path, 'README.md')
if os.path.exists(model_card_path):
try:
with open(model_card_path, encoding='utf8') as fIn:
self._model_card_text = fIn.read()
except:
pass
# Load the modules of sentence transformer
modules_json_path = os.path.join(model_path, 'modules.json')
with open(modules_json_path) as fIn:
modules_config = json.load(fIn)
modules = OrderedDict()
for module_config in modules_config:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
return modules
@staticmethod
def _get_scheduler(optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler. Available scheduler: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
@property
def device(self) -> device:
"""
Get torch.device from module, assuming that the whole module has one device.
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def tokenizer(self):
"""
Property to get the tokenizer that is used by this model
"""
return self._first_module().tokenizer
@tokenizer.setter
def tokenizer(self, value):
"""
Property to set the tokenizer that is should used by this model
"""
self._first_module().tokenizer = value
@property
def max_seq_length(self):
"""
Property to get the maximal input sequence length for the model. Longer inputs will be truncated.
"""
return self._first_module().max_seq_length
@max_seq_length.setter
def max_seq_length(self, value):
"""
Property to set the maximal input sequence length for the model. Longer inputs will be truncated.
"""
self._first_module().max_seq_length = value
|
test_parallel_backend.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
"""
Tests the parallel backend
"""
import threading
import multiprocessing
import random
import os
import sys
import subprocess
import numpy as np
from numba import config, utils
from numba import unittest_support as unittest
from numba import jit, vectorize, guvectorize
from .support import temp_directory, override_config, TestCase, tag
from .test_parfors import skip_unsupported as parfors_skip_unsupported
from .test_parfors import linux_only
from numba.six.moves import queue as t_queue
if utils.PYVERSION >= (3, 0):
import faulthandler
# Check which backends are available
# TODO: Put this in a subprocess so the address space is kept clean
try:
from numba.npyufunc import tbbpool # noqa: F401
_HAVE_TBB_POOL = True
except ImportError:
_HAVE_TBB_POOL = False
try:
from numba.npyufunc import omppool
_HAVE_OMP_POOL = True
except ImportError:
_HAVE_OMP_POOL = False
try:
import scipy.linalg.cython_lapack # noqa: F401
_HAVE_LAPACK = True
except ImportError:
_HAVE_LAPACK = False
# test skipping decorators
skip_no_omp = unittest.skipUnless(_HAVE_OMP_POOL, "OpenMP threadpool required")
skip_no_tbb = unittest.skipUnless(_HAVE_TBB_POOL, "TBB threadpool required")
_gnuomp = _HAVE_OMP_POOL and omppool.openmp_vendor == "GNU"
skip_unless_gnu_omp = unittest.skipUnless(_gnuomp, "GNU OpenMP only tests")
skip_unless_py3 = unittest.skipUnless(utils.PYVERSION >= (3, 0),
"Test runs on Python 3 only")
_windows = sys.platform.startswith('win')
_osx = sys.platform.startswith('darwin')
_windows_py27 = (sys.platform.startswith('win32') and
sys.version_info[:2] == (2, 7))
_32bit = sys.maxsize <= 2 ** 32
_parfors_unsupported = _32bit or _windows_py27
_HAVE_OS_FORK = not _windows
# some functions to jit
def foo(n, v):
return np.ones(n) + v
if _HAVE_LAPACK:
def linalg(n, v):
x = np.dot(np.ones((n, n)), np.ones((n, n)))
return x + np.arange(n) + v
else:
def linalg(n, v):
# no way to trigger MKL without the lapack bindings.
return np.arange(n) + v
def ufunc_foo(a, b):
return a + b
def gufunc_foo(a, b, out):
out[0] = a + b
class runnable(object):
def __init__(self, **options):
self._options = options
class jit_runner(runnable):
def __call__(self):
cfunc = jit(**self._options)(foo)
a = 4
b = 10
expected = foo(a, b)
got = cfunc(a, b)
np.testing.assert_allclose(expected, got)
class linalg_runner(runnable):
def __call__(self):
cfunc = jit(**self._options)(linalg)
a = 4
b = 10
expected = linalg(a, b)
got = cfunc(a, b)
np.testing.assert_allclose(expected, got)
class vectorize_runner(runnable):
def __call__(self):
cfunc = vectorize(['(f4, f4)'], **self._options)(ufunc_foo)
a = b = np.random.random(10).astype(np.float32)
expected = ufunc_foo(a, b)
got = cfunc(a, b)
np.testing.assert_allclose(expected, got)
class guvectorize_runner(runnable):
def __call__(self):
sig = ['(f4, f4, f4[:])']
cfunc = guvectorize(sig, '(),()->()', **self._options)(gufunc_foo)
a = b = np.random.random(10).astype(np.float32)
expected = ufunc_foo(a, b)
got = cfunc(a, b)
np.testing.assert_allclose(expected, got)
def chooser(fnlist, **kwargs):
q = kwargs.get('queue')
try:
if utils.PYVERSION >= (3, 0):
faulthandler.enable()
for _ in range(int(len(fnlist) * 1.5)):
fn = random.choice(fnlist)
fn()
except Exception as e:
q.put(e)
def compile_factory(parallel_class, queue_impl):
def run_compile(fnlist):
q = queue_impl()
kws = {'queue': q}
ths = [parallel_class(target=chooser, args=(fnlist,), kwargs=kws)
for i in range(4)]
for th in ths:
th.start()
for th in ths:
th.join()
if not q.empty():
errors = []
while not q.empty():
errors.append(q.get(False))
_msg = "Error(s) occurred in delegated runner:\n%s"
raise RuntimeError(_msg % '\n'.join([repr(x) for x in errors]))
return run_compile
# workers
_thread_class = threading.Thread
class _proc_class_impl(object):
def __init__(self, method):
self._method = method
def __call__(self, *args, **kwargs):
if utils.PYVERSION < (3, 0):
return multiprocessing.Process(*args, **kwargs)
else:
ctx = multiprocessing.get_context(self._method)
return ctx.Process(*args, **kwargs)
def _get_mp_classes(method):
if utils.PYVERSION < (3, 0):
proc = _proc_class_impl(method)
queue = multiprocessing.Queue
else:
if method == 'default':
method = None
ctx = multiprocessing.get_context(method)
proc = _proc_class_impl(method)
queue = ctx.Queue
return proc, queue
thread_impl = compile_factory(_thread_class, t_queue.Queue)
spawn_proc_impl = compile_factory(*_get_mp_classes('spawn'))
if not _windows:
fork_proc_impl = compile_factory(*_get_mp_classes('fork'))
forkserver_proc_impl = compile_factory(*_get_mp_classes('forkserver'))
# this is duplication as Py27, linux uses fork, windows uses spawn, it however
# is kept like this so that when tests fail it's less confusing!
default_proc_impl = compile_factory(*_get_mp_classes('default'))
class TestParallelBackendBase(TestCase):
"""
Base class for testing the parallel backends
"""
all_impls = [
jit_runner(nopython=True),
jit_runner(nopython=True, cache=True),
jit_runner(nopython=True, nogil=True),
linalg_runner(nopython=True),
linalg_runner(nopython=True, nogil=True),
vectorize_runner(nopython=True),
vectorize_runner(nopython=True, target='parallel'),
vectorize_runner(nopython=True, target='parallel', cache=True),
guvectorize_runner(nopython=True),
guvectorize_runner(nopython=True, target='parallel'),
guvectorize_runner(nopython=True, target='parallel', cache=True),
]
if not _parfors_unsupported:
parfor_impls = [
jit_runner(nopython=True, parallel=True),
jit_runner(nopython=True, parallel=True, cache=True),
linalg_runner(nopython=True, parallel=True),
linalg_runner(nopython=True, parallel=True, cache=True),
]
all_impls.extend(parfor_impls)
parallelism = ['threading', 'random']
if utils.PYVERSION > (3, 0):
parallelism.append('multiprocessing_spawn')
if _HAVE_OS_FORK:
parallelism.append('multiprocessing_fork')
parallelism.append('multiprocessing_forkserver')
else:
parallelism.append('multiprocessing_default')
runners = {
'concurrent_jit': [
jit_runner(nopython=True, parallel=(not _parfors_unsupported)),
],
'concurrect_vectorize': [
vectorize_runner(nopython=True, target='parallel'),
],
'concurrent_guvectorize': [
guvectorize_runner(nopython=True, target='parallel'),
],
'concurrent_mix_use': all_impls,
}
safe_backends = {'omp', 'tbb'}
def run_compile(self, fnlist, parallelism='threading'):
self._cache_dir = temp_directory(self.__class__.__name__)
with override_config('CACHE_DIR', self._cache_dir):
if parallelism == 'threading':
thread_impl(fnlist)
elif parallelism == 'multiprocessing_fork':
fork_proc_impl(fnlist)
elif parallelism == 'multiprocessing_forkserver':
forkserver_proc_impl(fnlist)
elif parallelism == 'multiprocessing_spawn':
spawn_proc_impl(fnlist)
elif parallelism == 'multiprocessing_default':
default_proc_impl(fnlist)
elif parallelism == 'random':
if utils.PYVERSION < (3, 0):
ps = [thread_impl, default_proc_impl]
else:
ps = [thread_impl, spawn_proc_impl]
if _HAVE_OS_FORK:
ps.append(fork_proc_impl)
ps.append(forkserver_proc_impl)
random.shuffle(ps)
for impl in ps:
impl(fnlist)
else:
raise ValueError(
'Unknown parallelism supplied %s' % parallelism)
_specific_backends = config.THREADING_LAYER in ('omp', 'tbb', 'workqueue')
@unittest.skipUnless(_specific_backends, "Threading layer not explicit")
class TestParallelBackend(TestParallelBackendBase):
""" These are like the numba.tests.test_threadsafety tests but designed
instead to torture the parallel backend.
If a suitable backend is supplied via NUMBA_THREADING_LAYER these tests
can be run directly. This test class cannot be run using the multiprocessing
option to the test runner (i.e. `./runtests -m`) as daemon processes cannot
have children.
"""
# NOTE: All tests are generated based on what a platform supports concurrent
# execution wise from Python, irrespective of whether the native libraries
# can actually handle the behaviour present.
@classmethod
def generate(cls):
for p in cls.parallelism:
for name, impl in cls.runners.items():
methname = "test_" + p + '_' + name
def methgen(impl, p):
def test_method(self):
selfproc = multiprocessing.current_process()
# daemonized processes cannot have children
if selfproc.daemon:
_msg = 'daemonized processes cannot have children'
self.skipTest(_msg)
else:
self.run_compile(impl, parallelism=p)
return test_method
fn = methgen(impl, p)
fn.__name__ = methname
setattr(cls, methname, fn)
TestParallelBackend.generate()
class TestSpecificBackend(TestParallelBackendBase):
"""
This is quite contrived, for each test in the TestParallelBackend tests it
generates a test that will run the TestParallelBackend test in a new python
process with an environment modified to ensure a specific threadsafe backend
is used. This is with view of testing the backends independently and in an
isolated manner such that if they hang/crash/have issues, it doesn't kill
the test suite.
"""
_DEBUG = False
backends = {'tbb': skip_no_tbb,
'omp': skip_no_omp,
'workqueue': unittest.skipIf(False, '')}
def run_cmd(self, cmdline, env):
popen = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
# finish in 5 minutes or kill it
timeout = threading.Timer(5 * 60., popen.kill)
try:
timeout.start()
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(
"process failed with code %s: stderr follows\n%s\n" %
(popen.returncode, err.decode()))
return out.decode(), err.decode()
finally:
timeout.cancel()
return None, None
def run_test_in_separate_process(self, test, threading_layer):
env_copy = os.environ.copy()
env_copy['NUMBA_THREADING_LAYER'] = str(threading_layer)
cmdline = [sys.executable, "-m", "numba.runtests", test]
return self.run_cmd(cmdline, env_copy)
@classmethod
def _inject(cls, p, name, backend, backend_guard):
themod = cls.__module__
thecls = TestParallelBackend.__name__
methname = "test_" + p + '_' + name
injected_method = '%s.%s.%s' % (themod, thecls, methname)
def test_template(self):
o, e = self.run_test_in_separate_process(injected_method, backend)
if self._DEBUG:
print('stdout:\n "%s"\n stderr:\n "%s"' % (o, e))
self.assertIn('OK', e)
self.assertTrue('FAIL' not in e)
self.assertTrue('ERROR' not in e)
injected_test = "test_%s_%s_%s" % (p, name, backend)
# Mark as long_running
setattr(cls, injected_test,
tag('long_running')(backend_guard(test_template)))
@classmethod
def generate(cls):
for backend, backend_guard in cls.backends.items():
for p in cls.parallelism:
for name in cls.runners.keys():
# handle known problem cases...
# GNU OpenMP is not fork safe
if (p in ('multiprocessing_fork', 'random') and
backend == 'omp' and
sys.platform.startswith('linux')):
continue
# workqueue is not thread safe
if (p in ('threading', 'random') and
backend == 'workqueue'):
continue
cls._inject(p, name, backend, backend_guard)
TestSpecificBackend.generate()
class ThreadLayerTestHelper(TestCase):
"""
Helper class for running an isolated piece of code based on a template
"""
# sys path injection and separate usecase module to make sure everything
# is importable by children of multiprocessing
_here = "%r" % os.path.dirname(__file__)
template = """if 1:
import sys
sys.path.insert(0, "%(here)r")
import multiprocessing
import numpy as np
from numba import njit
import numba
try:
import threading_backend_usecases
except ImportError as e:
print("DEBUG:", sys.path)
raise e
import os
sigterm_handler = threading_backend_usecases.sigterm_handler
busy_func = threading_backend_usecases.busy_func
def the_test():
%%s
if __name__ == "__main__":
the_test()
""" % {'here': _here}
def run_cmd(self, cmdline, env=None):
if env is None:
env = os.environ.copy()
env['NUMBA_THREADING_LAYER'] = str("omp")
popen = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
# finish in 5 minutes or kill it
timeout = threading.Timer(5 * 60., popen.kill)
try:
timeout.start()
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(
"process failed with code %s: stderr follows\n%s\n" %
(popen.returncode, err.decode()))
finally:
timeout.cancel()
return out.decode(), err.decode()
@parfors_skip_unsupported
class TestThreadingLayerSelection(ThreadLayerTestHelper):
"""
Checks that numba.threading_layer() reports correctly.
"""
_DEBUG = False
backends = {'tbb': skip_no_tbb,
'omp': skip_no_omp,
'workqueue': unittest.skipIf(False, '')}
@classmethod
def _inject(cls, backend, backend_guard):
def test_template(self):
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
Z = busy_func(X, Y)
assert numba.threading_layer() == '%s'
"""
runme = self.template % (body % backend)
cmdline = [sys.executable, '-c', runme]
env = os.environ.copy()
env['NUMBA_THREADING_LAYER'] = str(backend)
out, err = self.run_cmd(cmdline, env=env)
if self._DEBUG:
print(out, err)
injected_test = "test_threading_layer_selector_%s" % backend
setattr(cls, injected_test,
tag("important")(backend_guard(test_template)))
@classmethod
def generate(cls):
for backend, backend_guard in cls.backends.items():
cls._inject(backend, backend_guard)
TestThreadingLayerSelection.generate()
@parfors_skip_unsupported
@skip_unless_py3
class TestMiscBackendIssues(ThreadLayerTestHelper):
"""
Checks fixes for the issues with threading backends implementation
"""
_DEBUG = False
@skip_no_omp
def test_omp_stack_overflow(self):
"""
Tests that OMP does not overflow stack
"""
runme = """if 1:
from numba import vectorize, threading_layer
import numpy as np
@vectorize(['f4(f4,f4,f4,f4,f4,f4,f4,f4)'], target='parallel')
def foo(a, b, c, d, e, f, g, h):
return a+b+c+d+e+f+g+h
x = np.ones(2**20, np.float32)
foo(*([x]*8))
print("@%s@" % threading_layer())
"""
cmdline = [sys.executable, '-c', runme]
env = os.environ.copy()
env['NUMBA_THREADING_LAYER'] = "omp"
env['OMP_STACKSIZE'] = "100K"
out, err = self.run_cmd(cmdline, env=env)
if self._DEBUG:
print(out, err)
self.assertIn("@omp@", out)
@skip_no_tbb
def test_single_thread_tbb(self):
"""
Tests that TBB works well with single thread
https://github.com/numba/numba/issues/3440
"""
runme = """if 1:
from numba import njit, prange, threading_layer
@njit(parallel=True)
def foo(n):
acc = 0
for i in prange(n):
acc += i
return acc
foo(100)
print("@%s@" % threading_layer())
"""
cmdline = [sys.executable, '-c', runme]
env = os.environ.copy()
env['NUMBA_THREADING_LAYER'] = "tbb"
env['NUMBA_NUM_THREADS'] = "1"
out, err = self.run_cmd(cmdline, env=env)
if self._DEBUG:
print(out, err)
self.assertIn("@tbb@", out)
# 32bit or windows py27 (not that this runs on windows)
@parfors_skip_unsupported
@skip_unless_gnu_omp
class TestForkSafetyIssues(ThreadLayerTestHelper):
"""
Checks Numba's behaviour in various situations involving GNU OpenMP and fork
"""
_DEBUG = False
def test_check_threading_layer_is_gnu(self):
runme = """if 1:
from numba.npyufunc import omppool
assert omppool.openmp_vendor == 'GNU'
"""
cmdline = [sys.executable, '-c', runme]
out, err = self.run_cmd(cmdline)
def test_par_parent_os_fork_par_child(self):
"""
Whilst normally valid, this actually isn't for Numba invariant of OpenMP
Checks SIGABRT is received.
"""
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
Z = busy_func(X, Y)
pid = os.fork()
if pid == 0:
Z = busy_func(X, Y)
else:
os.wait()
"""
runme = self.template % body
cmdline = [sys.executable, '-c', runme]
try:
out, err = self.run_cmd(cmdline)
except AssertionError as e:
self.assertIn("failed with code -6", str(e))
def test_par_parent_implicit_mp_fork_par_child(self):
"""
Implicit use of multiprocessing fork context.
Does this:
1. Start with OpenMP
2. Fork to processes using OpenMP (this is invalid)
3. Joins fork
4. Check the exception pushed onto the queue that is a result of
catching SIGTERM coming from the C++ aborting on illegal fork
pattern for GNU OpenMP
"""
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
q = multiprocessing.Queue()
# Start OpenMP runtime on parent via parallel function
Z = busy_func(X, Y, q)
# fork() underneath with no exec, will abort
proc = multiprocessing.Process(target = busy_func, args=(X, Y, q))
proc.start()
err = q.get()
assert "Caught SIGTERM" in str(err)
"""
runme = self.template % body
cmdline = [sys.executable, '-c', runme]
out, err = self.run_cmd(cmdline)
if self._DEBUG:
print(out, err)
@linux_only
@skip_unless_py3
def test_par_parent_explicit_mp_fork_par_child(self):
"""
Explicit use of multiprocessing fork context.
Does this:
1. Start with OpenMP
2. Fork to processes using OpenMP (this is invalid)
3. Joins fork
4. Check the exception pushed onto the queue that is a result of
catching SIGTERM coming from the C++ aborting on illegal fork
pattern for GNU OpenMP
"""
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
q = multiprocessing.Queue()
# Start OpenMP runtime on parent via parallel function
Z = busy_func(X, Y, q)
# fork() underneath with no exec, will abort
ctx = multiprocessing.get_context('fork')
proc = ctx.Process(target = busy_func, args=(X, Y, q))
proc.start()
proc.join()
err = q.get()
assert "Caught SIGTERM" in str(err)
"""
runme = self.template % body
cmdline = [sys.executable, '-c', runme]
out, err = self.run_cmd(cmdline)
if self._DEBUG:
print(out, err)
@skip_unless_py3
def test_par_parent_mp_spawn_par_child_par_parent(self):
"""
Explicit use of multiprocessing spawn, this is safe.
Does this:
1. Start with OpenMP
2. Spawn to processes using OpenMP
3. Join spawns
4. Run some more OpenMP
"""
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
q = multiprocessing.Queue()
# Start OpenMP runtime and run on parent via parallel function
Z = busy_func(X, Y, q)
procs = []
ctx = multiprocessing.get_context('spawn')
for x in range(20): # start a lot to try and get overlap
## fork() + exec() to run some OpenMP on children
proc = ctx.Process(target = busy_func, args=(X, Y, q))
procs.append(proc)
sys.stdout.flush()
sys.stderr.flush()
proc.start()
[p.join() for p in procs]
try:
q.get(False)
except multiprocessing.queues.Empty:
pass
else:
raise RuntimeError("Queue was not empty")
# Run some more OpenMP on parent
Z = busy_func(X, Y, q)
"""
runme = self.template % body
cmdline = [sys.executable, '-c', runme]
out, err = self.run_cmd(cmdline)
if self._DEBUG:
print(out, err)
def test_serial_parent_implicit_mp_fork_par_child_then_par_parent(self):
"""
Implicit use of multiprocessing (will be fork, but cannot declare that
in Py2.7 as there's no process launch context).
Does this:
1. Start with no OpenMP
2. Fork to processes using OpenMP
3. Join forks
4. Run some OpenMP
"""
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
q = multiprocessing.Queue()
# this is ok
procs = []
for x in range(10):
# fork() underneath with but no OpenMP in parent, this is ok
proc = multiprocessing.Process(target = busy_func,
args=(X, Y, q))
procs.append(proc)
proc.start()
[p.join() for p in procs]
# and this is still ok as the OpenMP happened in forks
Z = busy_func(X, Y, q)
try:
q.get(False)
except multiprocessing.queues.Empty:
pass
else:
raise RuntimeError("Queue was not empty")
"""
runme = self.template % body
cmdline = [sys.executable, '-c', runme]
out, err = self.run_cmd(cmdline)
if self._DEBUG:
print(out, err)
@linux_only
@skip_unless_py3
def test_serial_parent_explicit_mp_fork_par_child_then_par_parent(self):
"""
Explicit use of multiprocessing 'fork'.
Does this:
1. Start with no OpenMP
2. Fork to processes using OpenMP
3. Join forks
4. Run some OpenMP
"""
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
q = multiprocessing.Queue()
# this is ok
procs = []
ctx = multiprocessing.get_context('fork')
for x in range(10):
# fork() underneath with but no OpenMP in parent, this is ok
proc = ctx.Process(target = busy_func, args=(X, Y, q))
procs.append(proc)
proc.start()
[p.join() for p in procs]
# and this is still ok as the OpenMP happened in forks
Z = busy_func(X, Y, q)
try:
q.get(False)
except multiprocessing.queues.Empty:
pass
else:
raise RuntimeError("Queue was not empty")
"""
runme = self.template % body
cmdline = [sys.executable, '-c', runme]
out, err = self.run_cmd(cmdline)
if self._DEBUG:
print(out, err)
@parfors_skip_unsupported
class TestInitSafetyIssues(TestCase):
_DEBUG = False
@linux_only # only linux can leak semaphores
@skip_unless_py3 # need multiprocessing.get_context to obtain spawn on linux
def test_orphaned_semaphore(self):
# sys path injection and separate usecase module to make sure everything
# is importable by children of multiprocessing
def run_cmd(cmdline):
popen = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,)
# finish in 5 minutes or kill it
timeout = threading.Timer(5 * 60., popen.kill)
try:
timeout.start()
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(
"process failed with code %s: stderr follows\n%s\n" %
(popen.returncode, err.decode()))
finally:
timeout.cancel()
return out.decode(), err.decode()
test_file = os.path.join(os.path.dirname(__file__),
"orphaned_semaphore_usecase.py")
cmdline = [sys.executable, test_file]
out, err = run_cmd(cmdline)
# assert no semaphore leaks reported on stderr
self.assertNotIn("leaked semaphore", err)
if self._DEBUG:
print("OUT:", out)
print("ERR:", err)
if __name__ == '__main__':
unittest.main()
|
bulk_operation.py
|
# coding=utf8
"""
@author:Administrator
@file: bulk_operation.py
@time: 2018/08/27
三大数据库的更简单的批次操作,自动聚合一定时间内的离散任务为批次任务。免除手工数组切片的烦恼。
"""
import atexit
import re
from elasticsearch import helpers
from threading import Thread
from typing import Union
import abc
import time
from queue import Queue, Empty
import unittest
# noinspection PyUnresolvedReferences
from pymongo import UpdateOne, InsertOne, UpdateMany, collection, MongoClient
import redis
from function_scheduling_distributed_framework.utils.time_util import DatetimeConverter
from function_scheduling_distributed_framework.utils import LoggerMixin, decorators, RedisMixin
class RedisOperation:
"""redis的操作,此类作用主要是规范下格式而已"""
def __init__(self, operation_name: str, key: str, value: str):
"""
:param operation_name: redis操作名字,例如 sadd lpush等
:param key: redis的键
:param value: reids键的值
"""
self.operation_name = operation_name
self.key = key
self.value = value
class BaseBulkHelper(LoggerMixin, metaclass=abc.ABCMeta):
"""批量操纵抽象基类"""
bulk_helper_map = {}
def __new__(cls, base_object, *args, **kwargs):
if str(base_object) not in cls.bulk_helper_map: # 加str是由于有一些类型的实例不能被hash作为字典的键
self = super().__new__(cls)
return self
else:
return cls.bulk_helper_map[str(base_object)]
def __init__(self, base_object: Union[collection.Collection, redis.Redis], threshold: int = 100, max_time_interval=10, is_print_log: bool = True):
if str(base_object) not in self.bulk_helper_map:
self._custom_init(base_object, threshold, max_time_interval, is_print_log)
self.bulk_helper_map[str(base_object)] = self
def _custom_init(self, base_object, threshold, max_time_interval, is_print_log):
self.base_object = base_object
self._threshold = threshold
self._max_time_interval = max_time_interval
self._is_print_log = is_print_log
self._to_be_request_queue = Queue(threshold * 2)
self._current_time = time.time()
self._last_has_task_time = time.time()
atexit.register(self.__do_something_before_exit) # 程序自动结束前执行注册的函数
self._main_thread_has_exit = False
Thread(target=self.__excute_bulk_operation_in_other_thread).start()
Thread(target=self.__check_queue_size).start()
# self.__excute_bulk_operation_in_other_thread()
self.logger.debug(f'{self.__class__}被实例化')
def add_task(self, base_operation: Union[UpdateOne, InsertOne, RedisOperation, tuple, dict]):
"""添加单个需要执行的操作,程序自动聚合陈批次操作"""
self._to_be_request_queue.put(base_operation)
# @decorators.tomorrow_threads(100)
@decorators.keep_circulating(1) # redis异常或网络异常,使其自动恢复。
def __excute_bulk_operation_in_other_thread(self):
while True:
if self._to_be_request_queue.qsize() >= self._threshold or time.time() > self._current_time + self._max_time_interval:
self._do_bulk_operation()
if self._main_thread_has_exit and self._to_be_request_queue.qsize() == 0:
pass
# break
time.sleep(10 ** -2)
@decorators.keep_circulating(60)
def __check_queue_size(self):
if self._to_be_request_queue.qsize() > 0:
self._last_has_task_time = time.time()
if time.time() - self._last_has_task_time > 60:
self.logger.info(f'{self.base_object} 最近一次有任务的时间是 : {DatetimeConverter(self._last_has_task_time)}')
@abc.abstractmethod
def _do_bulk_operation(self):
raise NotImplementedError
def __do_something_before_exit(self):
self._main_thread_has_exit = True
self.logger.critical(f'程序自动结束前执行 [{str(self.base_object)}] 执行剩余的任务')
class MongoBulkWriteHelper(BaseBulkHelper):
"""
一个更简单的批量插入,可以直接提交一个操作,自动聚合多个操作为一个批次再插入,速度快了n倍。
"""
def _do_bulk_operation(self):
if self._to_be_request_queue.qsize() > 0:
t_start = time.time()
count = 0
request_list = []
for _ in range(self._threshold):
try:
request = self._to_be_request_queue.get_nowait()
count += 1
request_list.append(request)
except Empty:
pass
if request_list:
self.base_object.bulk_write(request_list, ordered=False)
if self._is_print_log:
mongo_col_str = re.sub(r"document_class=dict, tz_aware=False, connect=True\),", "", str(self.base_object))
self.logger.info(f'【{mongo_col_str}】 批量插入的任务数量是 {count} 消耗的时间是 {round(time.time() - t_start, 6)}')
self._current_time = time.time()
class ElasticBulkHelper(BaseBulkHelper):
"""
elastic批量插入。
"""
def _do_bulk_operation(self):
if self._to_be_request_queue.qsize() > 0:
t_start = time.time()
count = 0
request_list = []
for _ in range(self._threshold):
try:
request = self._to_be_request_queue.get_nowait()
count += 1
request_list.append(request)
except Empty:
pass
if request_list:
# self.base_object.bulk_write(request_list, ordered=False)
helpers.bulk(self.base_object, request_list)
if self._is_print_log:
self.logger.info(f'【{self.base_object}】 批量插入的任务数量是 {count} 消耗的时间是 {round(time.time() - t_start, 6)}')
self._current_time = time.time()
class RedisBulkWriteHelper(BaseBulkHelper):
"""redis批量插入,比自带的更方便操作非整除批次"""
def _do_bulk_operation(self):
if self._to_be_request_queue.qsize() > 0:
t_start = time.time()
count = 0
pipeline = self.base_object.pipeline() # type: redis.client.Pipeline
for _ in range(self._threshold):
try:
request = self._to_be_request_queue.get_nowait()
count += 1
except Empty:
pass
else:
getattr(pipeline, request.operation_name)(request.key, request.value)
pipeline.execute()
pipeline.reset()
if self._is_print_log:
self.logger.info(f'[{str(self.base_object)}] 批量插入的任务数量是 {count} 消耗的时间是 {round(time.time() - t_start, 6)}')
self._current_time = time.time()
def _do_bulk_operation2(self):
if self._to_be_request_queue.qsize() > 0:
t_start = time.time()
count = 0
with self.base_object.pipeline() as pipeline: # type: redis.client.Pipeline
for _ in range(self._threshold):
try:
request = self._to_be_request_queue.get_nowait()
count += 1
except Empty:
pass
else:
getattr(pipeline, request.operation_name)(request.key, request.value)
pipeline.execute()
if self._is_print_log:
self.logger.info(f'[{str(self.base_object)}] 批量插入的任务数量是 {count} 消耗的时间是 {round(time.time() - t_start, 6)}')
self._current_time = time.time()
# noinspection SpellCheckingInspection,PyMethodMayBeStatic
class _Test(unittest.TestCase, LoggerMixin):
@unittest.skip
def test_mongo_bulk_write(self):
# col = MongoMixin().mongo_16_client.get_database('test').get_collection('ydf_test2')
col = MongoClient().get_database('test').get_collection('ydf_test2')
with decorators.TimerContextManager():
for i in range(50000 + 13):
# time.sleep(0.01)
item = {'_id': i, 'field1': i * 2}
mongo_helper = MongoBulkWriteHelper(col, 10000, is_print_log=True)
mongo_helper.add_task(UpdateOne({'_id': item['_id']}, {'$set': item}, upsert=True))
# @unittest.skip
def test_redis_bulk_write(self):
with decorators.TimerContextManager():
# r = redis.Redis(password='123456')
r = RedisMixin().redis_db0
redis_helper = RedisBulkWriteHelper(r, 200)
# redis_helper = RedisBulkWriteHelper(r, 100) # 放在外面可以
for i in range(1003):
# time.sleep(0.2)
# 也可以在这里无限实例化
redis_helper.add_task(RedisOperation('sadd', 'key1', str(i)))
if __name__ == '__main__':
unittest.main()
|
EventMsgWorker.py
|
#! /usr/bin/python
# ArtCM OMQS Event Worker Class
#
# (C) ArtCM 2015
#
# Date: 2015.9.22
__author__ = 'dennis'
from multiprocessing import Queue
import threading
import os, signal, time, logging, platform
import pika
from pika.exceptions import *
from ConfigReader import ConfigReader
from MsgWorker import MsgWorker
from AsyncPublisher import OMQSAsyncPublisher
from OMQSExceptions import *
from OMQSLogManager import OMQSLogManager
class EventMsgWorker(MsgWorker):
EVENT_KEY = 'omqs.key.event'
def __init__(self, q, name='EventMsgWorker'):
super(EventMsgWorker, self).__init__(q, name)
self._MQPublisher = None
self._MQthread = None
manager = OMQSLogManager(name=__name__, file_name=name)
self._logger = manager.logger
def worker_will_run(self):
try:
if self._MQPublisher:
self._MQPublisher.stop()
self._MQPublisher = OMQSAsyncPublisher(exchange_name='omqs.exchange.event',
queue_name='omqs.queue.event',
exchange_durable=True,
queue_durable=True,
confirm=False)
if self._MQPublisher:
self._MQthread = threading.Thread(target=self._MQPublisher.run, name='PublishingThread')
self._MQthread.start()
while not self._MQPublisher.is_ready():
time.sleep(0.1)
self._logger.info('[%s][%d] ready to run!', self.name, self.pid)
else:
raise InvalidPublisherError
except Exception, e:
self._logger.error('[%s][%d] error: %s. Stopping the publisher', self.name, self.pid, str(e))
if self._MQPublisher:
self._MQPublisher.stop()
self._MQPublisher = None
def msg_did_receive(self, msg):
#print 'msg = ', msg
try:
if self._MQPublisher:
index = msg.find(':')
key = msg[0:index]
if key == '' or not key:
key = EventMsgWorker.EVENT_KEY
body = msg[index+1:]
self._MQPublisher.publish_message(body, key)
except Exception, e:
self._logger.error('[%s][%d] error when publishing: %s', self.name, self.pid, str(e))
# backup the log message if there is anything wrong of the MQ connection
backup_file = open('./log/backup.evt', 'a')
backup_file.write(msg+'\n')
backup_file.close()
# TODO here:
# Do particular task for special work, e.g: sending txt msg or email for login
def worker_will_stop(self):
if self._MQPublisher:
print 'worker will quit!!'
self._MQPublisher.stop()
self._MQthread.join()
self._MQthread = None
def main():
q = Queue()
worker = EventMsgWorker(q)
worker.start()
t1 = time.time()
for i in range(1, 100):
msg = 'omqs.key.event:%d' % i
q.put(msg)
time.sleep(0.1)
t2 = time.time()
dt =t2 - t1
print 'total time: %f' % dt
time.sleep(3)
worker.terminate()
worker.join()
# Just for Test
if __name__ == '__main__':
main()
|
threadlatch.py
|
"""
Java の CountDownLatch 風なクラス (マルチスレッド処理用)
REFERENCES::
https://stackoverflow.com/questions/10236947/does-python-have-a-similar-control-mechanism-to-javas-countdownlatch
https://qiita.com/shinkiro/items/75f3561d6bc96694ce30
"""
import threading as th
import typing as ty
class CountDownLatch:
"""\
Java の CoundDownLatch 風なクラスです。
使い方は Java 版と同じで以下のメソッドを利用します。
- count_down
- await_
count_down メソッドを各スレッドが呼び出しカウントを減らす。
待機を行うスレッドは await_ メソッドで条件が揃うまで待つ。
本クラスは、スレッド処理で利用できます。
>>> # ---------------------------------------------
>>> # スレッド処理で利用する場合
>>> # ---------------------------------------------
>>> import threading as th
>>> import time as tm
>>> from trypython.advanced.threadlatch import CountDownLatch
>>> latch = CountDownLatch(2)
>>> th1 = th.Thread(target=lambda: latch.count_down())
>>> th2 = th.Thread(target=lambda: latch.count_down())
>>> latch.count
2
>>> latch.await_(timeout=1.0)
False
>>> th1.start()
>>> tm.sleep(1)
>>> latch.count
1
>>> latch.await_(timeout=1.0)
False
>>> th2.start()
>>> tm.sleep(1)
>>> latch.count
0
>>> latch.await_(timeout=1.0)
True
"""
def __init__(self, count: int = 1, condition: ty.Optional[th.Condition] = None):
"""
オブジェクトを初期化します。
:param count: カウント。デフォルト値は 1 です。 0 以下は指定できません。(ValueError)
"""
if count <= 0:
raise ValueError(f'0 以下は指定できません。 [{count}]')
self._count = count
self.lock = condition if condition else th.Condition()
@property
def count(self):
"""
現在のカウントを取得します。
:return: 現在のカウント
>>> import trypython.advanced.multiprocesslatch as mp_latch
>>> latch = mp_latch.CountDownLatch(5)
>>> latch.count
5
>>> latch.count_down()
>>> latch.count
4
>>> latch.count_down()
>>> latch.count_down()
>>> latch.count_down()
>>> latch.count
1
>>> latch.count_down()
>>> latch.count
0
>>> latch.count_down()
>>> latch.count
0
"""
return self._count
def count_down(self):
"""
件数を1減らします。
>>> import trypython.advanced.multiprocesslatch as mp_latch
>>> latch = mp_latch.CountDownLatch(5)
>>> latch.count
5
>>> latch.count_down()
>>> latch.count
4
>>> latch.count_down()
>>> latch.count_down()
>>> latch.count_down()
>>> latch.count
1
>>> latch.count_down()
>>> latch.count
0
>>> latch.count_down()
>>> latch.count
0
"""
with self.lock:
if self._count > 0:
self._count -= 1
if self._count <= 0:
self.lock.notify_all()
def await_(self, timeout: ty.Optional[float] = None) -> bool:
"""
カウントが 0 になるまで待機します。
timeout を指定している場合、指定時間後に結果を返します。
戻り値が False の場合、タイムアウトしたことを示します。
:param timeout: タイムアウト(秒)デフォルトは None で、カウントが 0 になるまで無制限待機します。
:return: タイムアウトした場合は False。それ以外は True
>>> import trypython.advanced.multiprocesslatch as mp_latch
>>> latch = mp_latch.CountDownLatch(5)
>>> latch.count
5
>>> latch.await_(timeout=0.1)
False
>>> latch.count_down()
>>> latch.count
4
>>> latch.await_(timeout=0.1)
False
>>> latch.count_down()
>>> latch.count_down()
>>> latch.count_down()
>>> latch.count
1
>>> latch.await_(timeout=0.1)
False
>>> latch.count_down()
>>> latch.await_()
True
"""
with self.lock:
if self._count > 0:
return self.lock.wait(timeout=timeout)
else:
return True
|
machine_motion.py
|
# File name: MachineMotion.py #
# Version: 4.3 #
# Note: Information about all the g-Code #
# commands supported are available at #
# the following location of the SDK: #
# ./documentation #
# Import standard libraries
import json, time, threading, sys
import traceback
import urllib
if sys.version_info.major < 3:
from httplib import HTTPConnection
from urllib import urlencode
else:
from http.client import HTTPConnection
from urllib.parse import urlencode
# Import package dependent libraries
import paho.mqtt.client as mqtt
import paho.mqtt.subscribe as MQTTsubscribe
class MACHINEMOTION_HW_VERSIONS:
MMv1 = 1
MMv2 = 2
MMv2OneDrive = 3
class DIRECTION:
POSITIVE = "positive"
NEGATIVE = "negative"
NORMAL = POSITIVE
REVERSE = NEGATIVE
CLOCKWISE = POSITIVE
COUNTERCLOCKWISE = NEGATIVE
class AXIS_NUMBER:
DRIVE1 = 1
DRIVE2 = 2
DRIVE3 = 3
DRIVE4 = 4
class UNITS_SPEED:
mm_per_min = "mm per minute"
mm_per_sec = "mm per second"
class UNITS_ACCEL:
mm_per_min_sqr = "mm per minute"
mm_per_sec_sqr = "mm per second"
class DEFAULT_IP_ADDRESS:
usb_windows = "192.168.7.2"
usb_mac_linux = "192.168.7.2"
ethernet = "192.168.0.2"
localhost = "127.0.0.1"
DEFAULT_IP = DEFAULT_IP_ADDRESS.usb_windows
class MICRO_STEPS:
ustep_full = 1
ustep_2 = 2
ustep_4 = 4
ustep_8 = 8
ustep_16 = 16
ustep_32 = 32
class MECH_GAIN:
timing_belt_150mm_turn = 150
legacy_timing_belt_200_mm_turn = 200
enclosed_timing_belt_mm_turn = 208
ballscrew_10mm_turn = 10
enclosed_ballscrew_16mm_turn = 16
legacy_ballscrew_5_mm_turn = 5
indexer_deg_turn = 85
indexer_v2_deg_turn = 36
roller_conveyor_mm_turn = 157.7
belt_conveyor_mm_turn = 73.563
rack_pinion_mm_turn = 157.08
rack_pinion_v2_mm_turn = 141.37
electric_cylinder_mm_turn = 6
belt_rack_mm_turn = 125
enclosed_lead_screw_mm_turn = 4
hd_roller_conveyor_mm_turn = 123.3
class STEPPER_MOTOR:
steps_per_turn = 200
class AUX_PORTS:
aux_1 = 0
aux_2 = 1
aux_3 = 2
aux_4 = 3
class ENCODER_TYPE:
real_time = "realtime-position"
stable = "stable-position"
class BRAKE_STATES:
locked = "locked"
unlocked = "unlocked"
unknown = "unknown"
class TUNING_PROFILES:
DEFAULT = "default"
CONVEYOR_TURNTABLE = "conveyor_turntable"
class CONTROL_LOOPS:
OPEN_LOOP = "open"
CLOSED_LOOP = "closed"
class POWER_SWITCH:
ON = "on"
OFF = "off"
class PUSH_BUTTON:
class COLOR:
BLACK = 0
WHITE = 1
class STATE:
PUSHED = "pushed"
RELEASED = "released"
class MOTOR_SIZE:
SMALL = "Small Servo"
MEDIUM = "Medium Servo"
LARGE = "Large Servo"
HARDWARE_MIN_HOMING_FEEDRATE = 500
HARDWARE_MAX_HOMING_FEEDRATE = 8000
MIN_MOTOR_CURRENT = 1.5 # Amps
MAX_MOTOR_CURRENT = 10.0 # Amps
class MQTT :
class PATH :
ESTOP = "estop"
ESTOP_STATUS = ESTOP + "/status"
ESTOP_TRIGGER_REQUEST = ESTOP + "/trigger/request"
ESTOP_TRIGGER_RESPONSE = ESTOP + "/trigger/response"
ESTOP_RELEASE_REQUEST = ESTOP + "/release/request"
ESTOP_RELEASE_RESPONSE = ESTOP + "/release/response"
ESTOP_SYSTEMRESET_REQUEST = ESTOP + "/systemreset/request"
ESTOP_SYSTEMRESET_RESPONSE = ESTOP + "/systemreset/response"
AUX_PORT_POWER = "aux_power"
AUX_PORT_SAFETY = "aux_safety_power"
SMARTDRIVES_READY = "smartDrives/areReady"
TIMEOUT = 10.0 # Number of seconds while we wait for MQTT response
DEFAULT_TIMEOUT = 65
def stderr(*args):
# print(*args, file=sys.stderr, flush=True) only works in python3
sys.stderr.write(" ".join(map(lambda x : str(x), list(args))) + "\n")
sys.stderr.flush()
def HTTPSend(host, path, data=None, JsonResponse=False, JsonRequest=False, timeout=DEFAULT_TIMEOUT):
timeout = DEFAULT_TIMEOUT if timeout <= 0 else timeout
timeouts = [
DEFAULT_TIMEOUT for i in range(int(timeout/DEFAULT_TIMEOUT))
]
if timeout % DEFAULT_TIMEOUT > 0:
timeouts.append(timeout % DEFAULT_TIMEOUT)
for i, current_timeout in enumerate(timeouts):
try:
lConn = HTTPConnection(host, timeout=current_timeout)
if None == data:
lConn.request("GET", path)
else:
contentType = "application/json" if JsonRequest else "application/octet-stream"
headers = { "Content-type": contentType}
lConn.request("POST", path, data, headers)
lResponse = lConn.getresponse()
status = lResponse.status
lResponse = lResponse.read()
if status != 200:
raise Exception("request http://%s%s failed with status %d: %s"
% (host, path, status, str(lResponse)))
if not JsonResponse:
# Casting as a string is necessary for python3
return str(lResponse)
return lResponse
except Exception as e:
stderr("ERROR - Could not GET %s: %s (%s)" % (path, traceback.format_exc(), e))
if lConn:
lConn.close()
lConn = None
if i + 1 == len(timeouts):
raise e
#
# Class that handles all gCode related communications
# @status
#
class GCode:
'''skip
'''
#
# Class constructor
# PRIVATE
# @param socket --- Description: The GCode class requires a socket object to communicate with the controller. The socket object is passed at contruction time.
# @status
#
def __init__(self, ip, isMMv2=False, isMMv2OneDrive=False):
# Passing in the socket instance at construction
self.myIp = ip
self.libPort = ":8000"
self.isMMv2 = isMMv2
self.isMMv2OneDrive = isMMv2OneDrive
return
#
# Function to map API axis labels to motion controller axis labels
# PRIVATE
# @param axis --- Description: The API axis label.
# @status
#
def __getTrueAxis__(self, axis):
if self.isMMv2OneDrive:
strs = "X"
elif self.isMMv2:
strs = "XYZW"
else:
strs = "XYZ"
if axis < 1 or axis > len(strs):
rng = ", ".join([str(i + 1) for i in range(len(strs))])
raise Exception("Invalid axis index %d ! (must be in range %s)" % (axis, rng))
return strs[axis - 1]
#
# Function that packages the data in a JSON object and sends to the MachineMotion server over a socket connection.
# PRIVATE
#
def __send__(self, cmd, data=None, JsonResponse=False, JsonRequest=False, timeout=DEFAULT_TIMEOUT):
return HTTPSend(self.myIp + self.libPort, cmd, data, JsonResponse, JsonRequest, timeout)
#
# Function to send a raw G-Code ASCII command
# @param gCode --- Description: gCode is string representing the G-Code command to send to the controller. Type: string.
# @status
#
def __emit__(self, gCode, timeout=DEFAULT_TIMEOUT):
try:
path = "/gcode?%s" % urlencode({"gcode": "%s" % gCode})
rep = self.__send__(path, timeout=timeout)
# Call user callback only if relevant
if self.__userCallback__ is not None:
self.__userCallback__(rep)
return rep
except Exception as e:
raise e
def __emitEchoOk__(self, gCode, timeout=DEFAULT_TIMEOUT):
reply = self.__emit__(gCode, timeout=timeout)
if ("echo" in reply and "ok" in reply):
return reply
else:
raise Exception('Error in gCode execution: %s' % str(reply))
#
# Function to send commands specifically to the smartDrives
# @status
#
def __sendToSmartDrives__(self, url, payload=None, JsonResponse=False, JsonRequest=False) :
rep = self.__send__(url, payload, JsonResponse, JsonRequest)
# Call user callback only if relevant
if self.__userCallback__ is None : pass
else :
self.__userCallback__(str(rep)) # Casting as a string is necessary for python3
return rep
#
# Function to send the config to the smartDrives
# @param config --- Description: config is an object representing the config payload to send to the controller. Type: object.
# @status
#
def __sendConfigToSmartDrives__(self, drive, payload, JsonResponse=False) :
url = "/smartDrives/configuration?%s" % urlencode({"drive": "%s" % drive})
return self.__sendToSmartDrives__(url, payload, JsonResponse, JsonRequest=True)
#
# Function to ask the configuration to the smartDrives
# @status
#
def __askConfigToSmartDrives__(self, drive) :
return self.__sendConfigToSmartDrives__(drive, None, JsonResponse=True)
#
# Function to ask the position to the smartDrives
# @status
#
def __askPositionToSmartDrives__(self) :
# Expecting a json encoded response
return self.__sendToSmartDrives__("/smartDrives/position", JsonResponse=True)
@staticmethod
def __userCallback__(data): return
def __keepSocketAlive__(self) : pass
# Private function
def __setUserCallback__(self, userCallback) :
# Save the user function to call on incoming messages locally
self.__userCallback__ = userCallback
# Start the periodic process that fetches the sockets that were received by the OS
self.__keepSocketAlive__()
return
#
# Class used to encapsulate the MachineMotion controller
# @status
#
class MachineMotion(object):
'''z-index:100
'''
# Version independent MQTT parser
def __parseMessage(self, message, jsonLoads=True):
# Decode the payload according to the Python version
if sys.version_info.major < 3:
payload = message
else:
payload = message.decode('utf-8')
# Json decode the payload or not
if jsonLoads:
try:
return json.loads(payload)
except Exception as e:
stderr("WARNING - Invalid JSON payload: " + e)
return None
return payload
# Version independent numerical checker
def _isNumber(self, number):
# Python 2 has two integer types - int and long. There is no 'long integer' in Python 3 anymore : integers in Python 3 are of unlimited size.
if sys.version_info.major < 3:
return isinstance(number, (int, long, float))
else:
return isinstance(number, (int, float))
# Class constructor
def __init__(self, machineIp=DEFAULT_IP_ADDRESS.usb_windows, gCodeCallback=(lambda *args: None), machineMotionHwVersion=MACHINEMOTION_HW_VERSIONS.MMv1) :
'''
desc: Constructor of MachineMotion class
params:
machineIp:
desc: IP address of the Machine Motion
type: string of the DEFAULT_IP_ADDRESS class, or other valid IP address
default: DEFAULT_IP_ADDRESS.usb_windows
gCodeCallback:
desc: Allows to define a custom behaviour for the MachineMotion object
when a response is received from the motion controller.
type: function
default: None
machineMotionHwVersion:
desc: The hardware version of the MachineMotion being used
type: string of the MACHINEMOTION_HW_VERSIONS class
default: MACHINEMOTION_HW_VERSIONS.MMv1
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: moveRelative.py, oneDriveControl.py
'''
self.IP = machineIp
self.machineMotionHwVersion = machineMotionHwVersion
self.isMMv2 = self.machineMotionHwVersion >= MACHINEMOTION_HW_VERSIONS.MMv2
self.isMMv2OneDrive = self.machineMotionHwVersion == MACHINEMOTION_HW_VERSIONS.MMv2OneDrive
self.myConfiguration = {"machineIp": self.IP, "machineGateway": "notInitialized", "machineNetmask": "notInitialized"}
self.myGCode = GCode(self.IP, self.isMMv2, self.isMMv2OneDrive)
self.myGCode.__setUserCallback__(gCodeCallback)
self.maxIo = 8 if self.isMMv2 else 3
self.myIoExpanderAvailabilityState = [ False ] * self.maxIo
self.myEncoderRealtimePositions = [ 0, 0, 0 ] # MMv1 only
self.myEncoderStablePositions = [ 0, 0, 0 ] # MMv1 only
self.digitalInputs = {}
self.pushButtonStates = {}
dev_count = 4 if self.isMMv2 else 3
self.brakeStatus_control = [None for i in range(dev_count)] # MMv2 does not support control power
self.brakeStatus_safety = [None for i in range(dev_count)]
self.estopStatus = None
self.areSmartDrivesReady = None
# MQTT
self.mqttCallbacks = [] # Custom MachineApp template variable
self.myMqttClient = None
self.myMqttClient = mqtt.Client()
self.myMqttClient.on_connect = self.__onConnect
self.myMqttClient.on_message = self.__onMessage
self.myMqttClient.on_disconnect = self.__onDisconnect
self.myMqttClient.connect(machineIp)
self.myMqttClient.loop_start()
#Set callback to default until user initialize it
self.eStopCallback = (lambda *args: None)
# Initialize all of the possible push button callbacks
# The callbacks are stored in a dict of dicts of functions. The dict is first built with
# the total amount of possible push button addresses, then with the total amount buttons
# available per module.
# The callback associated with pressing the black button of module 1 can be accessed as follows:
# address = 1
# function = self.pushButtonCallbacks[str(address)][str(PUSH_BUTTON.COLOR.BLACK)]
self.pushButtonCallbacks = {}
for address in range(1,self.maxIo+1):
self.pushButtonCallbacks.update({str(address):{}})
validParams = [i for i in PUSH_BUTTON.COLOR.__dict__.keys() if i[:1] != '_']
validValues = [str(PUSH_BUTTON.COLOR.__dict__[i]) for i in validParams]
for button in validValues:
self.pushButtonCallbacks[str(address)].update({button: (lambda *args: None)})
# Initializing axis parameters
self.steps_mm = ["Axis 0 does not exist", "notInitialized", "notInitialized", "notInitialized", "notInitialized"]
self.u_step = ["Axis 0 does not exist", "notInitialized", "notInitialized", "notInitialized", "notInitialized"]
self.mech_gain = ["Axis 0 does not exist", "notInitialized", "notInitialized", "notInitialized", "notInitialized"]
self.direction = ["Axis 0 does not exist", "notInitialized", "notInitialized", "notInitialized", "notInitialized"]
self.__registeredInputMap = {} # Custom MachineApp template variable
return
#Takes tuples of parameter variables and the class they belong to.
#If the parameter does not belong to the class, it raises a descriptive error.
def _restrictInputValue(self, argName, argValue, argClass):
validParams = [i for i in argClass.__dict__.keys() if i[:1] != '_']
validValues = [argClass.__dict__[i] for i in validParams]
if argValue in validValues:
return
errorMessage = "An invalid selection was made. Given parameter '" + str(argName) + "' must be one of the following values:"
for param in validParams:
errorMessage += "\n" + argClass.__name__ + "." + param + " (" + str(argClass.__dict__[param]) +")"
raise Exception(errorMessage)
def moveContinuous(self, axis, speed, accel) :
'''
desc: Starts an axis using speed mode.
params:
axis:
desc: Axis to move
type: Number
speed:
desc: Speed to move the axis at in mm / sec
type: Number
accel:
desc: Acceleration used to reach the desired speed, in mm / sec^2
type: Number
compatibility: MachineMotion v1 and MachineMotion v2, software version 2.3.0 and newer.
exampleCodePath: moveContinuous.py
'''
# Verify argument type to avoid sending garbage in the GCODE
self._restrictAxisValue(axis)
if not self._isNumber(speed) : raise Exception('Error in speed variable type')
if not self._isNumber(accel) : raise Exception('Error in accel variable type')
if self.isMMv2:
gCode = "V7 S" + str(speed) + " A" + str(abs(accel)) + " " + self.myGCode.__getTrueAxis__(axis)
else:
# Check if steps_per_mm are defined locally. If not, query them.
if not self._isNumber(self.steps_mm[axis]) :
self.populateStepsPerMm()
# Send speed command with accel
gCode = "V4 S" + str(speed * self.steps_mm[axis]) + " A" + str(abs(accel * self.steps_mm[axis])) + " " + self.myGCode.__getTrueAxis__(axis)
self.myGCode.__emitEchoOk__(gCode)
return
def stopMoveContinuous(self, axis, accel) :
'''
desc: Stops an axis using speed mode.
params:
axis:
desc: Axis to move
type: Number
accel:
desc: Acceleration used to reach a null speed, in mm / sec^2
type: Number
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: moveContinuous.py
'''
return self.moveContinuous(axis, 0, accel)
# ------------------------------------------------------------------------
# Determines if the given id is valid for a drive.
def _restrictAxisValue(self, axis) :
# MMv1 has drives 1,2,3
# MMv2 has drives 1,2,3,4
# MMv2OneDrive has drive 1
self.myGCode.__getTrueAxis__(axis)
return True
# ------------------------------------------------------------------------
# Determines if the given id is valid for a Brake.
# @param {int} port - Port identifier
def _restrictBrakePort(self, port):
# Brakes are connected to AUX port on MMv1
# Brakes are connected to JunctionBox brake port on MMv2
if self.isMMv2OneDrive:
maxId = 1
elif self.isMMv2:
maxId = 4
else:
maxId = 3
if (port < 1 or port > maxId):
rng = ", ".join([str(i + 1) for i in range(maxId)])
raise Exception("Invalid brake port %d ! (must be in range %s)" % (port, rng))
return True
# ------------------------------------------------------------------------
# Determines if the given id is valid for an IO Exapnder.
# @param {int} id - Device identifier
def isIoExpanderIdValid(self, id):
# IO-Expander IDs range between 1 and maxIo.
if id < 1 or id > self.maxIo:
rng = ", ".join([str(i) for i in range(self.maxIo)])
raise Exception("Invalid Digital IO Module device ID %d (must be in range %s)" % (id, rng))
return True
# ------------------------------------------------------------------------
# Determines if the given input pin identifier is valid for an IO Exapnder.
# @param {int} deviceId - Device identifier
# @param {int} pinId - Pin identifier
def isIoExpanderInputIdValid(self, deviceId, pinId):
self.isIoExpanderIdValid(deviceId)
# IO-Expander pins range between 0 and 3.
if (pinId < 0 or pinId > 3):
rng = ", ".join([str(i) for i in range(4)])
raise Exception("Invalid Digital IO Module pin id %d (must be in range %s)" % (pinId, rng))
return True
# ------------------------------------------------------------------------
# Determines if the given output pin identifier is valid for an IO Exapnder.
# @param {int} deviceId - Device identifier
# @param {int} pinId - Pin identifier
def isIoExpanderOutputIdValid(self, deviceId, pinId) :
return self.isIoExpanderInputIdValid(deviceId, pinId)
# ------------------------------------------------------------------------
# Determines if the given push button address is valid for a given push button module.
# @param {int} deviceId - Device identifier
# @param {str} buttonId - Button identifier
def isPushButtonInputIdValid(self, deviceId, buttonId):
self.isIoExpanderIdValid(deviceId)
self._restrictInputValue("buttonId", buttonId, PUSH_BUTTON.COLOR)
return True
# ------------------------------------------------------------------------
# Determines if the given id is valid for an encoder.
def isEncoderIdValid(self, id) :
# For MMv1 only, encoder IDs range 0, 1, 2.
if id < 0 or id > 2:
rng = ", ".join([str(i) for i in range(3)])
raise Exception("invalid encoder id %d (must be in range %s)" % (id, rng))
return True
def populateStepsPerMm(self,onlyMarlin=False):
if self.isMMv2:
raise Exception('function populateStepsPerMm is not supported by MachineMotion v2.')
# For axes 1,2,3 ask directly from Marlin
reply_M503 = self.myGCode.__emitEchoOk__("M503")
beginning = reply_M503.find('M92')
self.steps_mm[1] = float(reply_M503[reply_M503.find('X',beginning)+1:(reply_M503.find('Y',beginning)-1)])
self.steps_mm[2] = float(reply_M503[reply_M503.find('Y',beginning)+1:(reply_M503.find('Z',beginning)-1)])
self.steps_mm[3] = float(reply_M503[reply_M503.find('Z',beginning)+1:(reply_M503.find('E',beginning)-1)])
self.direction[1] = DIRECTION.NORMAL if self.steps_mm[1]>0 else DIRECTION.REVERSE
self.direction[2] = DIRECTION.NORMAL if self.steps_mm[2]>0 else DIRECTION.REVERSE
self.direction[3] = DIRECTION.NORMAL if self.steps_mm[3]>0 else DIRECTION.REVERSE
return
def deduce_steps_per_mm(self, mech_gain, u_step, direction) :
steps_per_mm = abs(float(STEPPER_MOTOR.steps_per_turn) * float(u_step) / float(mech_gain))
return -steps_per_mm if direction == DIRECTION.REVERSE else steps_per_mm
def getDesiredPositions(self, axis=None):
'''
desc: Returns the desired position of the axes.
params:
axis (optional):
desc: The axis to get the desired position of.
type: Number
returnValue: The position of the axis if that parameter was specified, or a dictionary containing the desired position of every axis.
returnValueType: Number or Dictionary of numbers
note: This function returns the 'open loop' position of each axis.
compatibility: Recommended for MachineMotion v1.
exampleCodePath: getPositions.py
'''
desiredPositions = self.getCurrentPositions()
if isinstance(axis, int) : # Axis is a single number, return a number
self._restrictAxisValue(axis)
if axis == 4:
raise Exception("The desired position of the 4th axis is not supported.")
return desiredPositions[axis]
else : # Return the whole dictionary
return desiredPositions
def getCurrentPositions(self):
# Note : Deprecated this function, as it does not do what its name suggests...
# It returns the desired position, and not the current one.
reply = self.myGCode.__emitEchoOk__("M114")
positions = {
1 : float(reply[reply.find('X')+2:(reply.find('Y')-1)]),
2 : float(reply[reply.find('Y')+2:(reply.find('Z')-1)]),
3 : float(reply[reply.find('Z')+2:(reply.find('E')-1)])
}
# Note : The desired position of the 4th drive is unobtainable from the smartDrives.
return positions
def getActualPositions(self, axis=None):
'''
desc: Returns the current position of the axes.
params:
axis (optional):
desc: The axis to get the current position of.
type: Number
returnValue: The position of the axis if that parameter was specified, or a dictionary containing the current position of every axis.
returnValueType: Number or Dictionary of numbers
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: getPositions.py
'''
axes = [1, 2, 3]
if axis != None : # Restrict argument if we were given some
self._restrictAxisValue(axis)
axes = [axis]
# If MM v1, use M114, and read step counts.
if not self.isMMv2 :
# Find out step counts
reply_M114 = self.myGCode.__emitEchoOk__("M114")
beginning = reply_M114.find('Count')
step_count = {
1 : int(reply_M114[reply_M114.find('X',beginning)+3:(reply_M114.find('Y',beginning)-1)]),
2 : int(reply_M114[reply_M114.find('Y',beginning)+2:(reply_M114.find('Z',beginning)-1)]),
3 : int(reply_M114[reply_M114.find('Z',beginning)+2:(reply_M114.find(' ', reply_M114.find('Z',beginning)+2 ))])
}
# Find out step per mm and then calculate position
positions = {}
for drive in axes :
# If the steps_per_mm are not defined locally, retrieve them from Marlin
if not self._isNumber(self.steps_mm[drive]) :
self.populateStepsPerMm()
# Deduce position
positions[drive] = int( step_count[drive] / self.steps_mm[drive] )
# If MM v2, use smartDrive server route
else :
reply = self.myGCode.__askPositionToSmartDrives__()
if ( "Error" in str(reply) ) : # str() encoding is necessary for Python3
raise Exception('Error in gCode execution')
else:
parsedReply = self.__parseMessage(reply)
if not self.isMMv2OneDrive :
positions = {
1 : parsedReply['X'],
2 : parsedReply['Y'],
3 : parsedReply['Z'],
4 : parsedReply['W']
}
else:
positions = {1 : parsedReply['X']}
if isinstance(axis, int) : # Axis is a single number, return a number
return positions[axis]
else : # Return the whole dictionary
return positions
def getEndStopState(self):
'''
desc: Returns the current state of all home and end sensors.
returnValue: The states of all end stop sensors {x_min, x_max, y_min, y_max, z_min, z_max} "TRIGGERED" or "open"
returnValueType: Dictionary
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: getEndStopState.py
'''
if self.isMMv2OneDrive:
states = {
'x_min' : None,
'x_max' : None
}
else:
states = {
'x_min' : None,
'x_max' : None,
'y_min' : None,
'y_max' : None,
'z_min' : None,
'z_max' : None,
'w_min' : None,
'w_max' : None
}
def trimUntil(S, key) :
return S[S.find(key) + len(key) :]
reply = self.myGCode.__emitEchoOk__("M119")
#If Python 2.7
if sys.version_info.major < 3:
keyE = "\n"
#If Python 3
else:
keyE = "\\n"
#Remove first line (echo line)
reply = trimUntil(reply, keyE)
if "x_min" in reply :
keyB = "x_min: "
states['x_min'] = reply[(reply.find(keyB) + len(keyB)) : (reply.find(keyE))]
#Remove x_min line
reply = trimUntil(reply, keyE)
else : raise Exception('Error in gCode')
if "x_max" in reply :
keyB = "x_max: "
states['x_max'] = reply[(reply.find(keyB) + len(keyB)) : (reply.find(keyE))]
#Remove x_max line
reply = trimUntil(reply, keyE)
else : raise Exception('Error in gCode')
if not self.isMMv2OneDrive:
if "y_min" in reply :
keyB = "y_min: "
states['y_min'] = reply[(reply.find(keyB) + len(keyB)) : (reply.find(keyE))]
#Remove y_min line
reply = trimUntil(reply, keyE)
else : raise Exception('Error in gCode')
if "y_max" in reply :
keyB = "y_max: "
states['y_max'] = reply[(reply.find(keyB) + len(keyB)) : (reply.find(keyE))]
#Remove y_max line
reply = trimUntil(reply, keyE)
else : raise Exception('Error in gCode')
if "z_min" in reply :
keyB = "z_min: "
states['z_min'] = reply[(reply.find(keyB) + len(keyB)) : (reply.find(keyE))]
#Remove z_min line
reply = trimUntil(reply, keyE)
else : raise Exception('Error in gCode')
if "z_max" in reply :
keyB = "z_max: "
states['z_max'] = reply[(reply.find(keyB) + len(keyB)) : (reply.find(keyE))]
#Remove z_max line
reply = trimUntil(reply, keyE)
else : raise Exception('Error in gCode')
if "w_min" in reply :
keyB = "w_min: "
states['w_min'] = reply[(reply.find(keyB) + len(keyB)) : (reply.find(keyE))]
#Remove w_min line
reply = trimUntil(reply, keyE)
elif self.isMMv2 : raise Exception('Error in gCode')
if "w_max" in reply :
keyB = "w_max: "
states['w_max'] = reply[(reply.find(keyB) + len(keyB)) : (reply.find(keyE))]
#Remove w_max line
reply = trimUntil(reply, keyE)
elif self.isMMv2 : raise Exception('Error in gCode')
return states
def stopAllMotion(self):
'''
desc: Immediately stops all motion of all axes.
note: This function is a hard stop. It is not a controlled stop and consequently does not decelerate smoothly to a stop. Additionally, this function is not intended to serve as an emergency stop since this stop mechanism does not have safety ratings.
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: stopAllMotion.py
'''
return self.myGCode.__emitEchoOk__("M410")
def moveToHomeAll(self):
'''
desc: Initiates the homing sequence of all axes. All axes will move home sequentially.
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: moveToHomeAll.py
'''
try:
return self.myGCode.__emitEchoOk__("G28", timeout=DEFAULT_TIMEOUT * 5)
except Exception as e:
self.stopAllMotion()
raise e
def moveToHome(self, axis):
'''
desc: Initiates the homing sequence for the specified axis.
params:
axis:
desc: The axis to be homed.
type: Number
note: If configAxisDirection is set to "normal" on axis 1, axis 1 will home itself towards sensor 1A. If configAxisDirection is set to "reverse" on axis 1, axis 1 will home itself towards sensor 1B.
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: moveToHome.py
'''
self._restrictAxisValue(axis)
gCode = "G28 " + self.myGCode.__getTrueAxis__(axis)
try:
return self.myGCode.__emitEchoOk__(gCode, timeout=DEFAULT_TIMEOUT * 5)
except Exception as e:
self.stopAllMotion()
raise e
def setSpeed(self, speed, units = UNITS_SPEED.mm_per_sec):
'''
desc: Sets the global speed for all movement commands on all axes.
params:
speed:
desc: The global max speed in mm/sec, or mm/min according to the units parameter.
type: Number
units:
desc: Units for speed. Can be switched to UNITS_SPEED.mm_per_min
defaultValue: UNITS_SPEED.mm_per_sec
type: String
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: setSpeed.py
'''
self._restrictInputValue("units", units, UNITS_SPEED)
if units == UNITS_SPEED.mm_per_min:
speed_mm_per_min = speed
elif units == UNITS_SPEED.mm_per_sec:
speed_mm_per_min = 60*speed
self.myGCode.__emitEchoOk__("G0 F" +str(speed_mm_per_min))
return
def setAcceleration(self, acceleration, units=UNITS_ACCEL.mm_per_sec_sqr):
'''
desc: Sets the global acceleration for all movement commands on all axes.
params:
mm_per_sec_sqr:
desc: The global acceleration in mm/s^2.
type: Number
units:
desc: Units for speed. Can be switched to UNITS_ACCEL.mm_per_min_sqr
defaultValue: UNITS_ACCEL.mm_per_sec_sqr
type: String
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: setAcceleration.py
'''
self._restrictInputValue("units", units, UNITS_ACCEL)
if units == UNITS_ACCEL.mm_per_sec_sqr:
accel_mm_per_sec_sqr = acceleration
elif units == UNITS_ACCEL.mm_per_min_sqr:
accel_mm_per_sec_sqr = acceleration/3600
# Note : Set travel and print acceleration, to impact G0 and G1 commands.
self.myGCode.__emitEchoOk__("M204 T" + str(accel_mm_per_sec_sqr) + " P" + str(accel_mm_per_sec_sqr))
return
def moveToPosition(self, axis, position):
'''
desc: Moves the specified axis to a desired end location.
params:
axis:
desc: The axis which will perform the absolute move command.
type: Number
position:
desc: The desired end position of the axis movement.
type: Number
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: moveToPosition.py
'''
self._restrictAxisValue(axis)
# Set to absolute motion mode
self.myGCode.__emitEchoOk__("G90")
# Transmit move command
self.myGCode.__emitEchoOk__("G0 " + self.myGCode.__getTrueAxis__(axis) + str(position))
return
def moveToPositionCombined(self, axes, positions):
'''
desc: Moves multiple specified axes to their desired end locations synchronously.
params:
axes:
desc: The axes which will perform the move commands. Ex - [1 ,3]
type: List
positions:
desc: The desired end position of all axess movement. Ex - [50, 10]
type: List
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: moveToPositionCombined.py
note: The current speed and acceleration settings are applied to the combined motion of the axes.
'''
if (not isinstance(axes, list) or not isinstance(positions, list)):
raise TypeError("Axes and Positions must be lists")
for axis in axes:
self._restrictAxisValue(axis)
# Set to absolute motion mode
self.myGCode.__emitEchoOk__("G90")
# Transmit move command
command = "G0"
for axis, position in zip(axes, positions):
command += " " + self.myGCode.__getTrueAxis__(axis) + str(position)
self.myGCode.__emitEchoOk__(command)
return
def moveRelative(self, axis, distance):
'''
desc: Moves the specified axis the specified distance.
params:
axis:
desc: The axis to move.
type: Integer
distance:
desc: The travel distance in mm.
type: Number
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: moveRelative.py
'''
self._restrictAxisValue(axis)
# Set to relative motion mode
self.myGCode.__emitEchoOk__("G91")
# Transmit move command
self.myGCode.__emitEchoOk__("G0 " + self.myGCode.__getTrueAxis__(axis) + str(distance))
return
def moveRelativeCombined(self, axes, distances):
'''
desc: Moves the multiple specified axes the specified distances.
params:
axes:
desc: The axes to move. Ex-[1,3]
type: List of Integers
distances:
desc: The travel distances in mm. Ex - [10, 40]
type: List of Numbers
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: moveRelativeCombined.py
note: The current speed and acceleration settings are applied to the combined motion of the axes.
'''
if (not isinstance(axes, list) or not isinstance(distances, list)):
raise TypeError("Axes and Distances must be lists")
for axis in axes:
self._restrictAxisValue(axis)
# Set to relative motion mode
self.myGCode.__emitEchoOk__("G91")
# Transmit move command
command = "G0"
for axis, distance in zip(axes, distances):
command += " " + self.myGCode.__getTrueAxis__(axis) + str(distance)
self.myGCode.__emitEchoOk__(command)
return
def setPosition(self, axis, position):
'''
desc: Override the current position of the specified axis to a new value.
params:
axis:
desc: Overrides the position on this axis.
type: Number
position:
desc: The new position value in mm.
type: Number
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: setPosition.py
'''
self._restrictAxisValue(axis)
# Transmit move command
self.myGCode.__emitEchoOk__("G92 " + self.myGCode.__getTrueAxis__(axis) + str(position))
return
def emitgCode(self, gCode):
'''
desc: Executes raw gCode on the controller.
params:
gCode:
desc: The g-code that will be passed directly to the controller.
type: String
note: All movement commands sent to the controller are by default in mm.
compatibility: Recommended for MachineMotion v1.
exampleCodePath: emitgCode.py
'''
return self.myGCode.__emit__(gCode)
def isMotionCompleted(self):
'''
desc: Indicates if the last move command has completed.
returnValue: Returns false if the machine is currently executing a movement command.
returnValueType: Boolean
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: getPositions.py
note: isMotionCompleted does not account for on-going continuous moves.
'''
#Sending gCode V0 command to
reply = self.myGCode.__emitEchoOk__("V0")
return ("COMPLETED" in reply)
def waitForMotionCompletion(self):
'''
desc: Pauses python program execution until machine has finished its current movement.
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: waitForMotionCompletion.py
note: waitForMotionCompletion does not account for on-going continuous moves.
'''
delay = 0.5
if(self.isMMv2):
delay = 0.1 # Shorter delay is possible on MMv2 because of the BeagleBone AI
while not self.isMotionCompleted() :
time.sleep(delay)
return
def configHomingSpeed(self, axes, speeds, units = UNITS_SPEED.mm_per_sec):
'''
desc: Sets homing speed for all selected axes.
params:
axes:
desc: A list of the axes to configure. ex - [1,2,3]
type: List of Numbers
speeds:
desc: A list of homing speeds to set for each axis. ex - [50, 50, 100]
type: List of Numbers
units:
desc: Units for speed. Can be switched to UNITS_SPEED.mm_per_min
defaultValue: UNITS_SPEED.mm_per_sec
type: String
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: configHomingSpeed.py
note: Once set, the homing speed will apply to all programs, including MachineLogic applications.
'''
try:
axes = list(axes)
speeds = list(speeds)
except TypeError:
axes = [axes]
speeds = [speeds]
if len(axes) != len(speeds):
raise Exception("Axes and speeds must be of same length")
for axis in axes:
self._restrictAxisValue(axis)
gCodeCommand = "V2"
for idx, axis in enumerate(axes):
if units == UNITS_SPEED.mm_per_sec:
speed_mm_per_min = speeds[idx] * 60
elif units == UNITS_SPEED.mm_per_min:
speed_mm_per_min = speeds[idx]
if speed_mm_per_min < HARDWARE_MIN_HOMING_FEEDRATE:
raise Exception("Your desired homing speed of " + str(speed_mm_per_min) + "mm/min can not be less than " + str(HARDWARE_MIN_HOMING_FEEDRATE) + "mm/min (" + str(HARDWARE_MIN_HOMING_FEEDRATE/60) + "mm/sec).")
if speed_mm_per_min > HARDWARE_MAX_HOMING_FEEDRATE:
raise Exception("Your desired homing speed of " + str(speed_mm_per_min) + "mm/min can not be greater than " + str(HARDWARE_MAX_HOMING_FEEDRATE) + "mm/min (" + str(HARDWARE_MAX_HOMING_FEEDRATE/60) + "mm/sec)")
gCodeCommand = gCodeCommand + " " + self.myGCode.__getTrueAxis__(axis) + str(speed_mm_per_min)
self.myGCode.__emitEchoOk__(gCodeCommand)
return
def configAxis(self, axis, uStep, mechGain) :
'''
desc: Configures motion parameters for a single axis on the MachineMotion v1.
params:
axis:
desc: The axis to configure.
type: Number
uStep:
desc: The microstep setting of the axis.
type: Number
mechGain:
desc: The distance moved by the actuator for every full rotation of the stepper motor, in mm/revolution.
type: Number
note: The uStep setting is hardcoded into the machinemotion controller through a DIP switch and is by default set to 8. The value here must match the value on the DIP Switch.
compatibility: MachineMotion v1 only.
exampleCodePath: configAxis.py
'''
if self.isMMv2 :
raise Exception("The function configAxis is not supported on MachineMotion v2.")
self._restrictAxisValue(axis)
self._restrictInputValue("uStep", uStep, MICRO_STEPS)
self.u_step[axis] = int(uStep)
self.mech_gain[axis] = abs(float(mechGain))
self.direction[axis] = DIRECTION.NORMAL if float(mechGain) > 0 else DIRECTION.REVERSE
self.steps_mm[axis] = self.deduce_steps_per_mm(self.mech_gain[axis], self.u_step[axis], self.direction[axis])
self.myGCode.__emitEchoOk__("M92 " + self.myGCode.__getTrueAxis__(axis) + str(self.steps_mm[axis]))
return
def configAxisDirection(self, axis, direction):
'''
desc: Configures a single axis to operate in either clockwise (normal) or counterclockwise (reverse) mode. Refer to the Automation System Diagram for the correct axis setting.
params:
axis:
desc: The specified axis.
type: Number
direction:
desc: A string from the DIRECTION class. Either 'DIRECTION.NORMAL' or 'DIRECTION.REVERSE'. Normal direction means the axis will home towards end stop sensor A and reverse will make the axis home towards end stop B.
type: String
compatibility: MachineMotion v1 only.
exampleCodePath: configAxisDirection.py
'''
if self.isMMv2 :
raise Exception("The function configAxisDirection is not supported on MachineMotion v2.")
self._restrictAxisValue(axis)
self._restrictInputValue("direction", direction, DIRECTION)
self.direction[axis] = direction
# Verify that steps_mm exists
if not self._isNumber(self.steps_mm[axis]):
self.populateStepsPerMm()
if(direction == DIRECTION.NORMAL):
self.myGCode.__emitEchoOk__("M92 " + self.myGCode.__getTrueAxis__(axis) + str(abs(self.steps_mm[axis])))
self.steps_mm[axis] = abs(self.steps_mm[axis])
elif (direction == DIRECTION.REVERSE):
self.myGCode.__emitEchoOk__("M92 " + self.myGCode.__getTrueAxis__(axis) + "-"+ str(abs(self.steps_mm[axis])))
self.steps_mm[axis] = -abs(self.steps_mm[axis])
return
def configStepper(self, drive, mechGain, direction, motorCurrent, microSteps = MICRO_STEPS.ustep_8, motorSize = MOTOR_SIZE.LARGE) :
'''
desc: Configures motion parameters as a stepper motor, for a single drive on the MachineMotion v2.
params:
drive:
desc: The drive to configure.
type: Number of the AXIS_NUMBER class
mechGain:
desc: The distance moved by the actuator for every full rotation of the stepper motor, in mm/revolution.
type: Number of the MECH_GAIN class
direction:
desc: The direction of the axis
type: String of the DIRECTION class
motorCurrent:
desc: The current to power the motor with, in Amps.
type: Number
microSteps:
desc: The microstep setting of the drive.
type: Number from MICRO_STEPS class
motorSize:
desc: The size of the motor(s) connected to the specified drive(s)
type: String from the MOTOR_SIZE class
default: MOTOR_SIZE.LARGE
note: Warning, changing the configuration can de-energize motors and thus cause unintended behaviour on vertical axes.
compatibility: MachineMotion v2 only.
exampleCodePath: configStepperServo.py
'''
if not self.isMMv2 :
raise Exception("The function configStepper is not supported on MachineMotion v1.")
if isinstance(drive, list):
raise Exception("The drive should be a Number and not a List")
loop = CONTROL_LOOPS.OPEN_LOOP
tuningProfile = TUNING_PROFILES.DEFAULT
self.configAxis_v2(drive, mechGain, direction, motorCurrent, loop, microSteps, tuningProfile,_motorSize=motorSize)
def configServo(self, drives, mechGain, directions, motorCurrent, tuningProfile = TUNING_PROFILES.DEFAULT, parentDrive=None,motorSize=MOTOR_SIZE.LARGE):
'''
desc: Configures motion parameters as a servo motor, for a single drive on the MachineMotion v2.
params:
drives:
desc: The drive or list of drives to configure.
type: Number or list of numbers of the AXIS_NUMBER class
mechGain:
desc: The distance moved by the actuator for every full rotation of the stepper motor, in mm/revolution.
type: Number of the MECH_GAIN class
directions:
desc: The direction or list of directions of each configured axis
type: String or list of strings of the DIRECTION class. Must have the same length as `drives`
motorCurrent:
desc: The current to power the motor with, in Amps.
type: Number
tuningProfile:
desc: The tuning profile of the smartDrive. Determines the characteristics of the servo motor's PID controller
type: String of the TUNING_PROFILES class
default: TUNING_PROFILES.DEFAULT
parentDrive:
desc: The parent drive of the multi-drive axis. The axis' home and end sensors must be connected to this drive.
type: Number
default: None
motorSize:
desc: The size of the motor(s) connected to the specified drive(s)
type: String from the MOTOR_SIZE class
default: MOTOR_SIZE.LARGE
note: Warning, changing the configuration can de-energize motors and thus cause unintended behaviour on vertical axes.
compatibility: MachineMotion v2 only.
exampleCodePath: configStepperServo.py, configMultiDriveServo.py
'''
if not self.isMMv2 :
raise Exception("The function configServo is not supported on MachineMotion v1.")
loop = CONTROL_LOOPS.CLOSED_LOOP
# Set the microsteps based on the desired gain. this will allow for higher speeds
if 0.0 <= mechGain < 75.0:
microSteps = MICRO_STEPS.ustep_full
elif 75.0 <= mechGain < 150.7:
microSteps = MICRO_STEPS.ustep_2
elif 150.7 <= mechGain:
microSteps = MICRO_STEPS.ustep_4
else:
raise Exception('Mechanical gain should be a positive value.')
self.configAxis_v2(drives, mechGain, directions, motorCurrent, loop, microSteps, tuningProfile, _parent = parentDrive, _motorSize = motorSize)
def configAxis_v2(self, drives, mechGain, directions, motorCurrent, loop, microSteps, tuningProfile, _parent=None, _motorSize = MOTOR_SIZE.LARGE):
if motorCurrent > MAX_MOTOR_CURRENT:
print("Motor current value was clipped to the maximum (" + str(MAX_MOTOR_CURRENT) + "A).")
motorCurrent = MAX_MOTOR_CURRENT
elif motorCurrent < MIN_MOTOR_CURRENT:
print("Motor current value was clipped to the minimum (" + str(MIN_MOTOR_CURRENT) + "A).")
motorCurrent = MIN_MOTOR_CURRENT
self._restrictInputValue("control loop type", loop, CONTROL_LOOPS)
self._restrictInputValue("microSteps", microSteps, MICRO_STEPS)
self._restrictInputValue("tuning profile", tuningProfile, TUNING_PROFILES)
self._restrictInputValue("motor Size", _motorSize, MOTOR_SIZE)
if _parent!=None:
self._restrictInputValue("parentDrive", _parent, AXIS_NUMBER)
if mechGain <= 0:
raise Exception('Mechanical gain should be a positive value.')
if isinstance(drives, list) != isinstance(directions, list):
raise Exception("Drives and directions must be of the same type.")
if not isinstance(drives, list):
drives = [drives]
if _parent is None:
_parent = drives[0]
directions = [directions]
if _parent not in drives:
raise Exception("parent drive is not in drives list")
if len(drives)>len(self.brakeStatus_control):
raise Exception("Number of Drives can not exceed {}".format(len(self.brakeStatus_control)))
if len(drives) != len(directions):
raise Exception("drives and directions are not the same length.")
for a, b in zip(drives, directions):
self._restrictAxisValue(a)
self._restrictInputValue("direction", b, DIRECTION)
self.mech_gain[a] = float(mechGain)
self.direction[a] = b
self.u_step[a] = microSteps
self.steps_mm[a] = self.deduce_steps_per_mm(self.mech_gain[a],
self.u_step[a],
self.direction[a])
payload = {
"gain": mechGain,
"drives": drives,
"directions": directions,
"motorCurrent": motorCurrent,
"loop": loop,
"microSteps": microSteps,
"tuningProfile": tuningProfile,
"parent": _parent,
"motorSize":_motorSize
}
reply = self.myGCode.__sendConfigToSmartDrives__(_parent, json.dumps(payload))
if "ok" not in reply:
raise Exception('Error while talking to the smartDrives')
return self
# ------------------------------------------------------------------------
# Determines if the io-expander with the given id is available
#
# @param device - The io-expander device identifier
# @return. - True if the io-expander exists; False otherwise
def isIoExpanderAvailable(self, device) :
return self.myIoExpanderAvailabilityState[ device-1 ]
def detectIOModules(self):
'''
desc: Returns a dictionary containing all detected IO Modules.
note: For more information, please see the digital IO datasheet <a href="https://vention.io/docs/datasheets/digital-io-module-datasheet-52">here</a>
returnValue: Dictionary with keys of format "Digital IO Network Id [id]" and values [id] where [id] is the network IDs of all connected digital IO modules.
returnValueType: Dictionary
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: digitalRead.py
'''
foundIOModules = {}
numIOModules = 0
# Note : Delay is needed for MQTT callbacks to get triggered (in case detectIOModules is the first function called after instanciating the MachineMotion object)
time.sleep(0.5)
# IO module possible addresses are 1, 2, 3
for ioDeviceID in range(1,4):
if self.isIoExpanderAvailable(ioDeviceID):
foundIOModules["Digital IO Network Id " + str(ioDeviceID)] = ioDeviceID
numIOModules = numIOModules + 1
if numIOModules == 0:
print("No IO Modules found.")
else:
return foundIOModules
return
def digitalRead(self, deviceNetworkId, pin) :
'''
desc: Reads the state of a digital IO modules input pins.
params:
deviceNetworkId:
desc: The IO Modules device network ID. It can be found printed on the product sticker on the back of the digital IO module.
type: Integer
pin:
desc: The index of the input pin.
type: Integer
returnValue: Returns 1 if the input pin is logic HIGH (24V) and returns 0 if the input pin is logic LOW (0V).
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: digitalRead.py
note: On older digital IO modules, the pin labels on the digital IO module (pin 1, pin 2, pin 3, pin 4) correspond in software to (0, 1, 2, 3). Therefore, digitalRead(deviceNetworkId, 2) will read the value on input pin 3.
'''
self.isIoExpanderInputIdValid( deviceNetworkId, pin ) # Enforce restrictions on IO-Expander ID and pin number
if (not hasattr(self, 'digitalInputs')):
self.digitalInputs = {}
if (not deviceNetworkId in self.digitalInputs):
self.digitalInputs[deviceNetworkId] = {}
if (not pin in self.digitalInputs[deviceNetworkId]):
self.digitalInputs[deviceNetworkId][pin] = 0
return self.digitalInputs[deviceNetworkId][pin]
def digitalWrite(self, deviceNetworkId, pin, value) :
'''
desc: Sets voltage on specified pin of digital IO output pin to either logic HIGH (24V) or LOW (0V).
params:
deviceNetworkId:
desc: The IO Modules device network ID. It can be found printed on the product sticker on the back of the digital IO module.
type: Integer
pin:
desc: The output pin number to write to.
type: Integer
value:
desc: Writing '1' or HIGH will set digial output to 24V, writing 0 will set digital output to 0V.
type: Integer
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: digitalWrite.py
note: Output pins maximum sourcing current is 75 mA and the maximum sinking current is 100 mA. On older digital IO modules, the pin labels on the digital IO module (pin 1, pin 2, pin 3, pin 4) correspond in software to (0, 1, 2, 3). Therefore, digitalWrite(deviceNetworkId, 2, 1) will set output pin 3 to 24V.
'''
self.isIoExpanderOutputIdValid( deviceNetworkId, pin ) # Enforce restrictions on IO-Expander ID and pin number
self.myMqttClient.publish('devices/io-expander/' + str(deviceNetworkId) + '/digital-output/' + str(pin), '1' if value else '0', retain=True)
return
def setPowerSwitch(self, deviceNetworkId, switchState) :
'''
desc: Sets a switch of a power switch module to ON (closed) or OFF (open).
params:
deviceNetworkId:
desc: Power switch device network ID. It can be found printed on the dipswitch of the power switch module.
type: Integer
value:
desc: Writing POWER_SWITCH.ON will set the switch to closed, writing POWER_SWITCH.OFF will set the switch to open.
type: Boolean or String of the POWER_SWITCH class
compatibility: MachineMotion v2.
exampleCodePath: powerSwitch.py
'''
if not self.isMMv2 :
raise Exception("The function setPowerSwitch is only supported on MachineMotion v2.")
self.isIoExpanderIdValid(deviceNetworkId)
if isinstance(switchState, bool): # accept boolean.
switchState = POWER_SWITCH.ON if switchState else POWER_SWITCH.OFF
else:
self._restrictInputValue("switchState", switchState, POWER_SWITCH)
self.myMqttClient.publish('devices/power-switch/' + str(deviceNetworkId) + '/digital-output/0', switchState, retain=True)
return
def waitOnPushButton(self, deviceNetworkId, button, state=PUSH_BUTTON.STATE.PUSHED, timeout=None) :
'''
desc: Wait until a push button has reached a desired state
params:
deviceNetworkId:
desc: The Push-Button module's device network ID. This is set on the dipswitch of the module.
type: Integer
button:
desc: The address of the button (PUSH_BUTTON.COLOR.BLACK or PUSH_BUTTON.COLOR.WHITE) you want to read
see PUSH_BUTTON class
type: Integer of the PUSH_BUTTON.COLOR class
state:
desc: The state of the push button (PUSH_BUTTON.STATE.PUSHED or PUSH_BUTTON.STATE.RELEASED) necessary to proceed
see PUSH_BUTTON class
type: String of the PUSH_BUTTON.STATE class
default: By default, this function will wait until the desired push button is PUSH_BUTTON.STATE.PUSHED.
timeout:
desc: The maximum time (seconds) the function will wait until it returns.
If no timeout is passed, the function will wait indefinitely.
type: Integer, Float or None
default: None
returnValue: `True` if push button has reached the desired state, `False` if the timeout has been reached.
compatibility: MachineMotion v2.
exampleCodePath: pushButton.py
'''
if not self.isMMv2:
raise Exception("The function waitOnPushButton is only supported on MachineMotion v2.")
self.isIoExpanderIdValid( deviceNetworkId )
self._restrictInputValue( "button", button, PUSH_BUTTON.COLOR )
self._restrictInputValue( "state", state, PUSH_BUTTON.STATE )
deviceNetworkId = str(deviceNetworkId)
button = str(button)
if (not hasattr(self, 'pushButtonStates')):
self.pushButtonStates = {}
if (not deviceNetworkId in self.pushButtonStates):
self.pushButtonStates[deviceNetworkId] = {}
if (not button in self.pushButtonStates[deviceNetworkId]):
self.pushButtonStates[deviceNetworkId][button] = "unknown"
if not(isinstance(timeout, int) or isinstance(timeout, float) or timeout==None):
raise Exception("timeout must be of type float, int or None!")
intervalSeconds = 0.1
if timeout!=None:
elapsedSeconds = 0
while (self.pushButtonStates[deviceNetworkId][button] != state and elapsedSeconds<timeout):
elapsedSeconds+=intervalSeconds
time.sleep(intervalSeconds)
if elapsedSeconds>timeout:
return False
else:
while self.pushButtonStates[deviceNetworkId][button] != state:
time.sleep(intervalSeconds)
return True
def bindPushButtonEvent (self, deviceNetworkId, button, callback_function) :
'''
desc: Configures a user-defined function to execute immediately after a change of state of a push button module button
params:
deviceNetworkId:
desc: The Push-Button module's device network ID. This is set on the dipswitch of the module.
type: Integer
button:
desc: The address of the button (PUSH_BUTTON.COLOR.BLACK or PUSH_BUTTON.COLOR.WHITE) you want to read
see PUSH_BUTTON class
type: Integer of the PUSH_BUTTON.COLOR class
callback_function:
desc: The function to be executed after a push button changes state.
type: function
note: The callback function used will be executed in a new thread.
compatibility: MachineMotion v2.
exampleCodePath: pushButton.py
'''
if not self.isMMv2:
raise Exception("The function bindPushButtonEvent is only supported on MachineMotion v2.")
self.isIoExpanderIdValid( deviceNetworkId )
self._restrictInputValue( "button", button, PUSH_BUTTON.COLOR )
if not callable(callback_function):
raise Exception("callback_function must be of type function!")
self.pushButtonCallbacks[str(deviceNetworkId)][str(button)] = callback_function
return
def readPushButton(self, deviceNetworkId, button) :
'''
desc: Reads the state of a Push-Button module button.
params:
deviceNetworkId:
desc: The Push-Button module's device network ID. This is set on the dipswitch of the module.
type: Integer of the PUSH_BUTTON.COLOR class
button:
desc: The address of the button (PUSH_BUTTON.COLOR.BLACK or PUSH_BUTTON.COLOR.WHITE) you want to read
see PUSH_BUTTON class
type: Integer of the PUSH_BUTTON.COLOR class
returnValue : Returns PUSH_BUTTON.STATE.RELEASED if the input button is released and returns PUSH_BUTTON.STATE.PUSHED if the input button pushed
compatibility: MachineMotion v2.
exampleCodePath: pushButton.py
'''
if not self.isMMv2:
raise Exception("The function waitOnPushButton is only supported on MachineMotion v2.")
self.isIoExpanderIdValid( deviceNetworkId ) # Enforce restrictions on Push-Button
self._restrictInputValue("button", button, PUSH_BUTTON.COLOR)
deviceNetworkId = str(deviceNetworkId)
button = str(button)
if (not hasattr(self, 'pushButtonStates')):
self.pushButtonStates = {}
if (not deviceNetworkId in self.pushButtonStates):
self.pushButtonStates[deviceNetworkId] = {}
if (not button in self.pushButtonStates[deviceNetworkId]):
self.pushButtonStates[deviceNetworkId][button] = "unknown"
return self.pushButtonStates[deviceNetworkId][button]
def readEncoder(self, encoder, readingType=ENCODER_TYPE.real_time) :
'''
desc: Returns the last received encoder position in counts.
params:
encoder:
desc: The identifier of the encoder to read
type: Integer
readingType:
desc: Either 'real time' or 'stable'. In 'real time' mode, readEncoder will return the most recently received encoder information. In 'stable' mode, readEncoder will update its return value only after the encoder output has stabilized around a specific value, such as when the axis has stopped motion.
type: String
returnValue: The current position of the encoder, in counts. The encoder has 3600 counts per revolution.
returnValueType: Integer
compatibility: MachineMotion v1 only.
exampleCodePath: readEncoder.py
note: The encoder position returned by this function may be delayed by up to 250 ms due to internal propogation delays.
'''
if self.isMMv2 :
raise Exception("The function readEncoder is not supported on MachineMotion v2.")
self._restrictInputValue("readingType", readingType, ENCODER_TYPE)
self.isEncoderIdValid(encoder) # Enforce restrictions on encoder ID
if readingType == ENCODER_TYPE.real_time:
return self.myEncoderRealtimePositions[encoder]
elif readingType == ENCODER_TYPE.stable:
return self.myEncoderStablePositions[encoder]
return
# ------------------------------------------------------------------------
# Reacts to an eStop event
#
# @param {bool} status - true or false
# @return : call to callback function
def eStopEvent(self, status) :
self.eStopCallback(status)
return
def triggerEstopWithMsg (self, msg="python-api") :
# This function allows you to trigger an estop and send a custom message to the service
# The message should be a string, which will then be logged by the eStop service to
# allow for easier error handling
# Creating return value for the function. If nothing good happens, it will return False
q = [] # List is used to pass share variables with a thread
q.append(False)
def mqttResponse(q) :
# Wait for response
return_value = q.pop()
return_value = self.__parseMessage(MQTTsubscribe.simple(MQTT.PATH.ESTOP_TRIGGER_RESPONSE, retained=False, hostname=self.IP).payload)
q.append(return_value)
return
mqttResponseThread = threading.Thread(target = mqttResponse, args=(q,))
mqttResponseThread.daemon = True
mqttResponseThread.start()
# Adding a delay to make sure MQTT simple function is launched before publish is made. Quick fix from bug on App. Launcher.
time.sleep(0.2)
# Publish trigger request on MQTT
self.myMqttClient.publish(MQTT.PATH.ESTOP_TRIGGER_REQUEST, msg) # we pass a message to the eStop service, for debugging
mqttResponseThread.join(MQTT.TIMEOUT)
if mqttResponseThread.is_alive() :
raise Exception('eStop is still not triggered after ' + str(MQTT.TIMEOUT) + ' seconds')
else :
return q.pop()
def triggerEstop (self) :
'''
desc: Triggers the MachineMotion software emergency stop, cutting power to all drives and enabling brakes (if any). The software E stop must be released (using releaseEstop()) in order to re-enable the machine.
returnValue: The success of the operation.
returnValueType: Boolean
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: eStop.py
'''
return self.triggerEstopWithMsg()
def releaseEstop (self) :
'''
desc: Releases the software E-stop and provides power back to the drives.
returnValue: The success of the operation.
returnValueType: Boolean
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: eStop.py
'''
# Creating return value for the function. If nothing good happens, it will return False
q = [] # List is used to pass share variables with a thread
q.append(False)
def mqttResponse(q) :
# Wait for response
return_value = q.pop()
return_value = self.__parseMessage(MQTTsubscribe.simple(MQTT.PATH.ESTOP_RELEASE_RESPONSE, retained=False, hostname=self.IP).payload)
q.append(return_value)
return
mqttResponseThread = threading.Thread(target = mqttResponse, args=(q,))
mqttResponseThread.daemon = True
mqttResponseThread.start()
# Adding a delay to make sure MQTT simple function is launched before publish is made. Quick fix from bug on App. Launcher.
time.sleep(0.2)
# Publish release request on MQTT
self.myMqttClient.publish(MQTT.PATH.ESTOP_RELEASE_REQUEST, "") # payload message is not important
mqttResponseThread.join(MQTT.TIMEOUT)
if mqttResponseThread.is_alive():
raise Exception('eStop is still not released after ' + str(MQTT.TIMEOUT) + ' seconds')
else :
return q.pop()
def resetSystem (self) :
'''
desc: Resets the system after an eStop event
returnValue: The success of the operation.
returnValueType: Boolean
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: eStop.py
'''
# Creating return value for the function. If nothing good happens, it will return False
q = [] # List is used to pass share variables with a thread
q.append(False)
def mqttResponse(q, should_kill_thread_function) :
# Wait for response
return_value = q.pop()
return_value = self.__parseMessage(MQTTsubscribe.simple(MQTT.PATH.ESTOP_SYSTEMRESET_RESPONSE, retained=False, hostname=self.IP).payload)
if self.isMMv2 and return_value :
# If the resetSystem was successful, we need to wait for the drives to be energized on MMv2 (takes approximately 3sec)
while not self.areSmartDrivesReady :
if should_kill_thread_function() :
return
time.sleep(0.1)
q.append(return_value)
return
should_kill_thread = False
mqttResponseThread = threading.Thread(target = mqttResponse, args=(q, lambda : should_kill_thread))
mqttResponseThread.daemon = True
mqttResponseThread.start()
# Adding a delay to make sure MQTT simple function is launched before publish is made. Quick fix from bug on App. Launcher.
time.sleep(0.2)
# Publish reset system request on MQTT
self.myMqttClient.publish(MQTT.PATH.ESTOP_SYSTEMRESET_REQUEST, "") # payload message is not important
mqttResponseThread.join(MQTT.TIMEOUT)
if mqttResponseThread.is_alive():
should_kill_thread = True # Kill the thread waiting on MQTT topic
raise Exception('System is still not ready after ' + str(MQTT.TIMEOUT) + ' seconds')
else :
return q.pop()
def bindeStopEvent (self, callback_function) :
'''
desc: Configures a user defined function to execute immediately after an E-stop event.
params:
callback_function:
type: function
desc: The function to be executed after an e-stop is triggered or released.
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: eStop.py
'''
self.eStopCallback = callback_function
self.eStopCallback(self.estopStatus)
return
def lockBrake (self, aux_port_number, safety_adapter_presence = False) :
'''
desc: Lock the brake, by shutting off the power of the designated AUX port of the MachineMotion (0V).
params:
aux_port_number:
type: Integer
desc: The number of the AUX port the brake is connected to.
safety_adapter_presence:
type: Boolean
desc: Is a yellow safety adapter plugged in between the brake cable and the AUX port.
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: controlBrakes.py
note: This function is compatible only with V1F and more recent MachineMotions.
'''
self._restrictBrakePort(aux_port_number)
# The safety_adapter_presence flag and specifically its default value are incompatible with MM v2. It must always be set to true.
if self.isMMv2 and safety_adapter_presence == False :
raise Exception("The 'safety_adapter_presence' flag must be set to True for MachineMotion v2.")
topic = MQTT.PATH.AUX_PORT_SAFETY if safety_adapter_presence else MQTT.PATH.AUX_PORT_POWER
self.myMqttClient.publish(topic + '/' + str(aux_port_number) + '/request', '0V')
return
def unlockBrake (self, aux_port_number, safety_adapter_presence = False) :
'''
desc: Unlock the brake, by powering on the designated AUX port of the MachineMotion (24V).
params:
aux_port_number:
type: Integer
desc: The number of the AUX port the brake is connected to.
safety_adapter_presence:
type: Boolean
desc: Is a yellow safety adapter plugged in between the brake cable and the AUX port.
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: controlBrakes.py
note: This function is compatible only with V1F and more recent MachineMotions.
'''
self._restrictBrakePort(aux_port_number)
# The safety_adapter_presence flag and specifically its default value are incompatible with MM v2. It must always be set to true.
if self.isMMv2 and safety_adapter_presence == False :
raise Exception("The 'safety_adapter_presence' flag must be set to True for MachineMotion v2.")
topic = MQTT.PATH.AUX_PORT_SAFETY if safety_adapter_presence else MQTT.PATH.AUX_PORT_POWER
self.myMqttClient.publish(topic + '/' + str(aux_port_number) + '/request', '24V')
return
def getBrakeState (self, aux_port_number, safety_adapter_presence = False) :
'''
desc: Read the current state of the brake connected to a given AUX port of the MachineMotion.
params:
aux_port_number:
type: Integer
desc: The number of the AUX port the brake is connected to.
safety_adapter_presence:
type: Boolean
desc: Is a yellow safety adapter plugged in between the brake cable and the AUX port.
returnValue: The current state of the brake, as determined according to the current voltage of the AUX port (0V or 24V). The returned String can be "locked", "unlocked", or "unknown" (for MachineMotions prior to the V1F hardware version), as defined by the BRAKE_STATES class.
returnValueType: String
compatibility: MachineMotion v1 and MachineMotion v2.
exampleCodePath: controlBrakes.py
note: This function is compatible only with V1F and more recent MachineMotions.
'''
self._restrictBrakePort(aux_port_number)
# The safety_adapter_presence flag and specifically its default value are incompatible with MM v2. It must always be set to true.
if self.isMMv2 and safety_adapter_presence == False :
raise Exception("The 'safety_adapter_presence' flag must be set to True for MachineMotion v2.")
voltage = self.brakeStatus_safety[aux_port_number-1] if safety_adapter_presence else self.brakeStatus_control[aux_port_number-1]
if voltage == '0V' : return BRAKE_STATES.locked
elif voltage == '24V' : return BRAKE_STATES.unlocked
else : return BRAKE_STATES.unknown
# ------------------------------------------------------------------------
# Register to the MQTT broker on each connection.
#
# @param client - The MQTT client identifier (us)
# @param userData - The user data we have supply on registration (none)
# @param flags - Connection flags
# @param rc - The connection return code
def __onConnect(self, client, userData, flags, rc):
if rc == 0:
self.myMqttClient.subscribe('devices/+/+/available')
self.myMqttClient.subscribe('devices/+/+/digital-input/#')
self.myMqttClient.subscribe('devices/encoder/+/realtime-position')
self.myMqttClient.subscribe('devices/encoder/+/stable-position')
self.myMqttClient.subscribe(MQTT.PATH.ESTOP_STATUS)
self.myMqttClient.subscribe(MQTT.PATH.AUX_PORT_SAFETY + '/+/status')
self.myMqttClient.subscribe(MQTT.PATH.AUX_PORT_POWER + '/+/status')
self.myMqttClient.subscribe(MQTT.PATH.SMARTDRIVES_READY)
return
# ------------------------------------------------------------------------
# Update our internal state from the messages received from the MQTT broker
#
# @param client - The MQTT client identifier (us)
# @param userData - The user data we have supply on registration (none)
# @param msg - The MQTT message recieved
def __onMessage(self, client, userData, msg):
# try/except to make _onMessage robust to garbage MQTT messages
try:
# Custom callback list for MachineApps
for callback in self.mqttCallbacks:
callback(msg.topic, msg.payload.decode('utf-8'))
topicParts = msg.topic.split('/')
if topicParts[0]=="devices":
device = int( topicParts[2] )
device_str = topicParts[2]
deviceType = topicParts[1]
if (deviceType == 'push-button'):
self.isIoExpanderIdValid(device)
if (topicParts[3] == 'digital-input'):
button = int ( topicParts[4] )
button_str = topicParts[4]
self.isPushButtonInputIdValid(device,button)
state = self.__parseMessage(msg.payload, jsonLoads=False)
self._restrictInputValue("state", state, PUSH_BUTTON.STATE)
if (not hasattr(self, 'pushButtonStates')):
self.pushButtonStates = {}
if (not device_str in self.pushButtonStates):
self.pushButtonStates[device_str] = {}
if (not button_str in self.pushButtonStates[device_str]):
self.pushButtonStates[device_str][button_str] = {}
self.pushButtonStates[device_str][button_str] = state
# to free up Mqtt, call the resulting fn in a new thread
cbThread = threading.Thread(target=self.pushButtonCallbacks[device_str][button_str], args=[state])
cbThread.start()
return
if (deviceType == 'io-expander'):
self.isIoExpanderIdValid(device)
if (topicParts[3] == 'available'):
availability = self.__parseMessage(msg.payload)
if (availability):
self.myIoExpanderAvailabilityState[device-1] = True
return
else:
self.myIoExpanderAvailabilityState[device-1] = False
return
pin = int( topicParts[4] )
self.isIoExpanderInputIdValid(device, pin) # Enforce restrictions on IO-Expander ID and pin number
value = int(self.__parseMessage(msg.payload))
if (not hasattr(self, 'digitalInputs')):
self.digitalInputs = {}
if (not device in self.digitalInputs):
self.digitalInputs[device] = {}
self.digitalInputs[device][pin]= value
return
elif (deviceType == 'encoder'):
device = int( topicParts[2] )
position_type = topicParts[3]
position = float( self.__parseMessage(msg.payload) )
if position_type == ENCODER_TYPE.real_time :
self.myEncoderRealtimePositions[device] = position
elif position_type == ENCODER_TYPE.stable :
self.myEncoderStablePositions[device] = position
return
elif (topicParts[0] == MQTT.PATH.ESTOP) :
if (topicParts[1] == "status") :
self.estopStatus = self.__parseMessage(msg.payload)
self.eStopEvent(self.estopStatus)
return
elif (topicParts[0] == MQTT.PATH.AUX_PORT_POWER) :
if (topicParts[2] == "status") :
aux_port = int( topicParts[1] )
self.brakeStatus_control[aux_port-1] = self.__parseMessage(msg.payload, jsonLoads=False)
elif (topicParts[0] == MQTT.PATH.AUX_PORT_SAFETY) :
if (topicParts[2] == "status") :
aux_port = int( topicParts[1] )
self.brakeStatus_safety[aux_port-1] = self.__parseMessage(msg.payload, jsonLoads=False)
elif (msg.topic == MQTT.PATH.SMARTDRIVES_READY) :
self.areSmartDrivesReady = self.__parseMessage(msg.payload)
except Exception as e:
stderr(e)
return
def __onDisconnect(self, client, userData, rc):
#print("Disconnected with rtn code [%d]"% (rc))
return
########################
######## LEGACY ########
########################
# All the functions below are left only for legacy and backwards compatibility purposes.
# Vention does not advise to use any of them.
#
# Function that indicates if the GCode communication port is ready to send another command.
# @status
#
def isReady(self):
return True
#return self.myGCode.__isReady__()
def emitStop(self):
return self.stopAllMotion()
def emitHomeAll(self):
if not self.isMMv2:
allDrives = 3
elif self.isMMv2OneDrive:
allDrives = 1
else:
allDrives = 4
for drive in range(1,allDrives+1):
retVal = self.moveToHome(drive)
self.waitForMotionCompletion()
return retVal
def emitHome(self,axis):
retVal = self.moveToHome(axis)
self.waitForMotionCompletion()
return retVal
def emitSpeed(self, speed, units = UNITS_SPEED.mm_per_sec):
self.setSpeed(speed, units)
def emitAcceleration(self, acceleration, units=UNITS_ACCEL.mm_per_sec_sqr):
self.setAcceleration(acceleration, units)
def emitAbsoluteMove(self, axis, position):
self.moveToPosition(axis, position)
def emitCombinedAxesAbsoluteMove(self, axes, positions):
self.moveToPositionCombined(axes, positions)
def emitRelativeMove(self, axis, direction, distance):
self._restrictInputValue("direction", direction, DIRECTION)
if direction == DIRECTION.NEGATIVE:
distance = -float(distance)
self.moveRelative(axis, distance)
def emitCombinedAxesRelativeMove(self, axes, directions, distances):
if (not isinstance(axes, list) or not isinstance(directions, list) or not isinstance(distances, list)):
raise TypeError("Axes, Directions and Distances must be lists")
# Transmit move command
tempDistances=[-x if direction == DIRECTION.NEGATIVE else x for direction,x in zip(directions,distances)]
self.moveRelativeCombined(axes, tempDistances)
def emitCombinedAxisRelativeMove(self, axes, directions, distances):
self.emitCombinedAxesRelativeMove(axes, directions, distances)
def setContinuousMove(self, axis, speed, accel = 100):
self.moveContinuous(axis, speed, accel)
def stopContinuousMove(self, axis, accel = 100) :
self.stopMoveContinuous(axis, accel)
def configMachineMotionIp(self, mode = None, machineIp = None, machineNetmask = None, machineGateway = None):
# Note : This function has been deprecated. Please use the ControlCenter to configure the networking.
# '''
# desc: Set up the required network information for the Machine Motion controller. The router can be configured in either DHCP mode or static mode.
# params:
# mode:
# desc: Sets Network Mode to either DHCP or static addressing. Either <code>NETWORK_MODE.static</code> or <code>NETWORK_MODE.dhcp</code>
# type: Constant
# machineIp:
# desc: The static IP Address given to the controller. (Required if mode = <code>NETWORK_MODE.static</code>)
# type: String
# machineNetmask:
# desc: The netmask IP Address given to the controller. (Required if mode = <code>NETWORK_MODE.static</code>)
# type: String
# machineGateway:
# desc: The gateway IP Address given to the controller. (Required if mode = <code>NETWORK_MODE.static</code>)
# type: String
# Note: All Strings expect the format "XXX.XXX.XXX.XXX". To connect the controller to the internet, the gateway IP should be the same IP as your LAN router.
# exampleCodePath: configMachineMotionIp.py
# '''
if(mode == NETWORK_MODE.static):
if (machineIp is None) or (machineNetmask is None) or (machineGateway is None) :
stderr("NETWORK ERROR: machineIp, machineNetmask and machineGateway cannot be left blank in static mode")
return False
# Create a new object and augment it with the key value.
if mode is not None : self.myConfiguration["mode"] = mode
if machineIp is not None : self.myConfiguration["machineIp"] = machineIp
if machineNetmask is not None : self.myConfiguration["machineNetmask"] = machineNetmask
if machineGateway is not None : self.myConfiguration["machineGateway"] = machineGateway
HTTPSend(self.IP + ":8000", "/configIp", json.dumps(self.myConfiguration))
time.sleep(1)
return
def configMinMaxHomingSpeed(self, axes, minspeeds, maxspeeds, units = UNITS_SPEED.mm_per_sec):
# Note : This function has been deprecated.
# '''
# desc: Sets the minimum and maximum homing speeds for each axis.
# params:
# axes:
# desc: a list of the axes that require minimum and maximum homing speeds.
# type: List
# minspeeds:
# desc: the minimum speeds for each axis.
# type: List
# maxspeeds:
# desc: the maximum speeds for each axis, in the same order as the axes parameter
# type: List
# exampleCodePath: configHomingSpeed.py
# note: This function can be used to set safe limits on homing speed. Because homing speed is configured only through software aPI, this safeguards against developers accidently modifying homing speed to unsafe levels.
# '''
try:
axes = list(axes)
minspeeds = list(minspeeds)
maxspeeds = list(maxspeeds)
except TypeError:
axes = [axes]
minspeeds = [minspeeds]
maxspeeds = [maxspeeds]
if len(axes) != len(minspeeds) or len(axes) != len(maxspeeds):
class InputsError(Exception):
pass
raise InputsError("axes and speeds must be of same length")
for axis in axes:
self._restrictInputValue("axis", axis, AXIS_NUMBER)
gCodeCommand = "V1"
for idx, axis in enumerate(axes):
if units == UNITS_SPEED.mm_per_sec:
min_speed_mm_per_min = minspeeds[idx] * 60
max_speed_mm_per_min = maxspeeds[idx] * 60
elif units == UNITS_SPEED.mm_per_min:
min_speed_mm_per_min = minspeeds[idx]
max_speed_mm_per_min = maxspeeds[idx]
if min_speed_mm_per_min < HARDWARE_MIN_HOMING_FEEDRATE:
raise Exception("Your desired homing speed of " + str(min_speed_mm_per_min) + "mm/min can not be less than " + str(HARDWARE_MIN_HOMING_FEEDRATE) + "mm/min (" + str(HARDWARE_MIN_HOMING_FEEDRATE/60) + "mm/sec).")
if max_speed_mm_per_min > HARDWARE_MAX_HOMING_FEEDRATE:
raise Exception("Your desired homing speed of " + str(max_speed_mm_per_min) + "mm/min can not be greater than " + str(HARDWARE_MAX_HOMING_FEEDRATE) + "mm/min (" + str(HARDWARE_MAX_HOMING_FEEDRATE/60) + "mm/sec)")
gCodeCommand = gCodeCommand + " " + self.myGCode.__getTrueAxis__(axis) + str(min_speed_mm_per_min) + ":" + str(max_speed_mm_per_min)
self.myGCode.__emitEchoOk__(gCodeCommand)
return
def saveData(self, key, data) :
# Note : This function has been deprecated.
# '''
# desc: Saves/persists data within the MachineMotion Controller in key - data pairs.
# params:
# key:
# desc: A string that uniquely identifies the data to save for future retreival.
# type: String
# data:
# desc: The data to save to the machine. The data must be convertible to JSON format.
# type: String
# note: The Data continues to exist even when the controller is shut off. However, writing to a previously used key will override the previous value.
# exampleCodePath: getData_saveData.py
# '''
# Create a new object and augment it with the key value.
dataPack = {}
dataPack["fileName"] = key
dataPack["data"] = data
# Send the request to MachineMotion
HTTPSend(self.IP + ":8000", "/saveData", json.dumps(dataPack))
time.sleep(0.05)
return
def getData(self, key, callback):
# Note : This function has been deprecated.
# '''
# desc: Retreives saved/persisted data from the MachineMotion controller (in key-data pairs). If the controller takes more than 3 seconds to return data, the function will return with a value of "Error - getData took too long" under the given key.
# params:
# key:
# desc: A Unique identifier representing the data to be retreived
# type: String
# callback:
# desc: Function to callback to process data.
# type: function
# exampleCodePath: getData_saveData.py
# returnValue: A dictionary containing the saved data.
# returnValueType: Dictionary
# '''
callback(HTTPSend(self.IP + ":8000", "/getData", key))
return
def emitDwell(self, milliseconds) :
# Note : This function has been deprecated.
# '''
# desc: Pauses motion for a specified time. This function is non-blocking; your program may accomplish other tasks while the machine is dwelling.
# params:
# miliseconds:
# desc: The duration to wait in milliseconds.
# type: Integer
# note: The timer starts after all previous MachineMotion movement commands have finished execution.
# exampleCodePath: emitDwell.py
# '''
self.myGCode.__emitEchoOk__("G4 P"+str(milliseconds))
return
#This function is left in for legacy, however it is not documented because it is the same functionality as readEncoder
def readEncoderRealtimePosition(self, encoder):
self.isEncoderIdValid( encoder ) # Enforce restrictions on encoder ID
return self.myEncoderRealtimePositions[encoder]
# ------------------------------------------------------------------------
# Moves a motor with a certain set of parameters
#
# @param {int} motor - motor # to move
# @param {float} rotation - number of rotation to do
# @param {float} speed - motor speed in rotation/sec
# @param {float} accel - motor acceleration in rotation/sec^2
# @param {string} reference - "absolute" (default) or "relative"
# @param {string} type - "synchronous" (default) or "asychronous"
# @return {bool} - True if command completed properly
def move(self, motor, rotation = None, speed = None, accel = None, reference = "absolute", type = "synchronous") :
if rotation is not None :
# set motor to position mode
self.myGCode.__emitEchoOk__("V5 " + self.myGCode.__getTrueAxis__(motor) + "1")
if speed is not None :
# send speed command (need to convert rotation/s to mm/min )
self.myGCode.__emitEchoOk__("G0 F" + str(speed * 60 * self.mech_gain[motor]))
if accel is not None :
# send accel command (need to convert rotation/s^2 to mm/s^2)
# Note : Set travel and print acceleration, to impact G0 and G1 commands.
self.myGCode.__emitEchoOk__("M204 T" + str(accel * self.mech_gain[motor]) + " P" + str(accel * self.mech_gain[motor]))
if reference == "absolute" :
# Set to absolute motion mode
self.myGCode.__emitEchoOk__("G90")
# Transmit move command
self.myGCode.__emitEchoOk__("G0 " + self.myGCode.__getTrueAxis__(motor) + str(rotation * self.mech_gain[motor]))
elif reference == "relative" :
# send relative move command
# Set to relative motion mode
self.myGCode.__emitEchoOk__("G91")
# Transmit move command
self.myGCode.__emitEchoOk__("G0 " + self.myGCode.__getTrueAxis__(motor) + str(rotation * self.mech_gain[motor]))
else :
return False
if type == "synchronous" :
self.waitForMotionCompletion()
return True
elif type == "asynchronous" :
return True
else :
if speed is not None and accel is not None :
# set motor to speed mode
self.myGCode.__emitEchoOk__("V5 " + self.myGCode.__getTrueAxis__(motor) + "2")
# Send speed command
self.myGCode.__emitEchoOk__("V4 S" + str(speed * STEPPER_MOTOR.steps_per_turn * self.u_step[motor]) + " A" + str(accel * STEPPER_MOTOR.steps_per_turn * self.u_step[motor]) + " " + self.myGCode.__getTrueAxis__(motor))
else :
return False
return False
# Custom MachineApp template-specific code
def addMqttCallback(self, func):
if not func in self.mqttCallbacks:
self.mqttCallbacks.append(func)
def removeMqttCallback(self, func):
self.mqttCallbacks.remove(func)
def registerInput(self, name, digitalIo, pin):
self.__registeredInputMap[name] = 'devices/io-expander/' + str(digitalIo) + '/digital-input/' + str(pin)
def getInputTopic(self, name):
if not name in self.__registeredInputMap:
return None
return self.__registeredInputMap[name]
class MachineMotionV2(MachineMotion):
'''z-index:99
'''
def __init__(self, machineIp=DEFAULT_IP_ADDRESS.usb_windows):
'''
desc: Constructor of MachineMotionV2 class
params:
machineIp:
desc: IP address of the Machine Motion
type: string of the DEFAULT_IP_ADDRESS class, or other valid IP address
compatibility: MachineMotion v2.
exampleCodePath: moveRelative.py
'''
super(MachineMotionV2, self).__init__(machineIp,
machineMotionHwVersion=MACHINEMOTION_HW_VERSIONS.MMv2)
class MachineMotionV2OneDrive(MachineMotion):
'''z-index:98
'''
def __init__(self, machineIp=DEFAULT_IP_ADDRESS.usb_windows):
'''
desc: Constructor of MachineMotionV2OneDrive class
params:
machineIp:
desc: IP address of the Machine Motion
type: string of the DEFAULT_IP_ADDRESS class, or other valid IP address
compatibility: MachineMotion v2 One Drive.
exampleCodePath: oneDriveControl.py
'''
super(MachineMotionV2OneDrive, self).__init__(machineIp,
machineMotionHwVersion=MACHINEMOTION_HW_VERSIONS.MMv2OneDrive)
########################
######## LEGACY ########
########################
# All the classes below are left only for legacy and backwards compatibility purposes.
# Vention does not advise to use any of them.
class CONTROL_DEVICE_SIGNALS:
SIGNAL0 = "SIGNAL0"
SIGNAL1 = "SIGNAL1"
SIGNAL2 = "SIGNAL2"
SIGNAL3 = "SIGNAL3"
SIGNAL4 = "SIGNAL4"
SIGNAL5 = "SIGNAL5"
SIGNAL6 = "SIGNAL6"
class CONTROL_DEVICE_TYPE:
IO_EXPANDER_GENERIC = "IO_EXPANDER_GENERIC"
ENCODER = "ENCODER"
class CONTROL_DEVICE_PORTS:
SENSOR4 = "SENSOR4"
SENSOR5 = "SENSOR5"
SENSOR6 = "SENSOR6"
class NETWORK_MODE:
static = "static"
dhcp = "dhcp"
|
EnvironmentManager.py
|
# -*- coding: utf-8 -*-
import threading
from ..config.ConfigurationManager import ConfigurationManager
from ..results.ResultsManager import ResultsManager
from ..execution.ExecutionManager import ExecutionManager
from .EnvironmentProperties import EnvironmentProperties
from ..benchmarks.BenchmarkSuiteInstance import BenchmarkSuiteInstance
from .StandardBenchmarkSuite import StandardBenchmarkSuite
from .SystemInfo import SystemInfo
class EnvironmentManager(ExecutionManager):
__DURATION = 5
def __init__(self):
self.__cpu_measurement = None
self.__video_measurement = None
self.__disk_measurement = None
configuration = ConfigurationManager()
configuration.duration = EnvironmentManager.__DURATION
results = ResultsManager()
super().__init__(configuration, results)
try:
self.__load()
except Exception as err:
# Ignore. it shall never happen here...
pass
@property
def system_info(self):
return SystemInfo()
@property
def cpu_measurement(self):
return self.__cpu_measurement
@property
def video_measurement(self):
return self.__video_measurement
@property
def disk_measurement(self):
return self.__disk_measurement
def measure(self, cpu, disk, video):
def async_start(func):
_thread = threading.Thread(target=func)
_thread.start()
_thread.join()
try:
if cpu:
async_start(self.__measure_cpu)
if disk:
async_start(self.__measure_disk)
if video:
async_start(self.__measure_video)
self.__save()
except Exception as err:
raise err
finally:
self.stop()
def __load(self):
properties = EnvironmentProperties()
properties.load()
self.__cpu_measurement = properties.get_as_double('CpuMeasurement', 0)
self.__video_measurement = properties.get_as_double('VideoMeasurement', 0)
self.__disk_measurement = properties.get_as_double('DiskMeasurement', 0)
def __save(self):
properties = EnvironmentProperties()
properties.set_as_double('CpuMeasurement', self.__cpu_measurement)
properties.set_as_double('VideoMeasurement', self.video_measurement)
properties.set_as_double('DiskMeasurement', self.__disk_measurement)
properties.save()
def __measure_cpu(self):
suite = StandardBenchmarkSuite()
instance = BenchmarkSuiteInstance(suite)
instance.unselect_all()
instance.select_by_name(suite.cpu_benchmark.name)
try:
self.run(instance.is_selected)
result = self._results.all[0].performance_measurement.average_value if len(self._results.all) else 0
self.__cpu_measurement = result
except Exception as err:
raise err
def __measure_disk(self):
suite = StandardBenchmarkSuite()
instance = BenchmarkSuiteInstance(suite)
instance.unselect_all()
instance.select_by_name(suite.disk_benchmark.name)
try:
self.run(instance.is_selected)
result = self._results.all[0].performance_measurement.average_value if len(self._results.all) else 0
self.__disk_measurement = result
except Exception as err:
raise err
def __measure_video(self):
suite = StandardBenchmarkSuite()
instance = BenchmarkSuiteInstance(suite)
instance.unselect_all()
instance.select_by_name(suite.video_benchmark.name)
try:
self.run(instance.is_selected)
result = self._results.all[0].performance_measurement.average_value if len(self._results.all) else 0
self.__video_measurement = result
except Exception as err:
raise err
|
halperf.py
|
#!/usr/bin/python3
import argparse
import os
import signal
import sys
import time
from ctypes import *
from threading import Thread, Timer, Lock
verbose = False
# Open C shared libs to the datatypes; TODO: make spec-driven
xdc_so = None
gma_so = None
plock = Lock()
DATA_TYP_POS = 1
DATA_TYP_DIS = 2
DATA_TYP_HB_ORANGE = 13
DATA_TYP_HB_GREEN = 113
class GapsTag(Structure):
_fields_ = [("mux", c_uint),
("sec", c_uint),
("typ", c_uint)]
class ClosureTrailer(Structure):
_fields_ = [('seq', c_uint),
('rqr', c_uint),
('old', c_uint),
('mid', c_ushort),
('crc', c_ushort)]
class Position(Structure):
_fields_ = [("x", c_double),
("y", c_double),
("z", c_double),
("t", ClosureTrailer)]
class Distance(Structure):
_fields_ = [("x", c_double),
("y", c_double),
("z", c_double),
("t", ClosureTrailer)]
class RptTimer(Timer):
def run(self):
while not self.finished.is_set():
self.finished.wait(self.interval)
self.function(*self.args, **self.kwargs)
self.finished.set()
class Stats():
def __init__(self):
self.wincnt = 0
self.totcnt = 0
self.totsec = 0
global_stats = {}
def get_key(d,m,s,t):
return '%s-%d-%d-%d' % (d, int(m), int(s), int(t))
def send(m, s, t, r, interval):
# Context/Socket setup
makesock = xdc_so.xdc_pub_socket
makesock.restype = c_void_p
sock = makesock()
#initial values
pos = Position(-74.574489, 40.695545, 101.9, ClosureTrailer(0,0,0,0,0))
dis = Distance(-1.021, 2.334, 0.4)
tag = GapsTag(int(m),int(s),int(t))
key = get_key('s', m, s, t)
global_stats[key] = Stats()
slock = Lock()
if int(t) in {DATA_TYP_POS, DATA_TYP_HB_ORANGE, DATA_TYP_HB_GREEN}:
adu = Position(pos.x, pos.y, pos.z, ClosureTrailer(0,0,0,0,0))
elif int(t) == 2:
adu = Distance(dis.x, dis.y, dis.z, ClosureTrailer(0,0,0,0,0))
else:
raise Exception('unsupported data typ: ' + str(t))
def task(stats,slock):
adu.z += 0.1
xdc_so.xdc_asyn_send(c_void_p(sock), pointer(adu), pointer(tag))
slock.acquire()
stats.wincnt += 1
slock.release()
if verbose:
plock.acquire()
print('%f sent_msg: [%d/%d/%d] -- (%f,%f,%f)' % (time.time(), tag.mux,tag.sec,tag.typ,adu.x,adu.y,adu.z))
plock.release()
def print_stats(stats,tag,slock):
stats.totsec += interval
mst = "%d/%d/%d" % (tag.mux, tag.sec, tag.typ)
plock.acquire()
print("{:5.2f}s | send | {:8s} | {:5d} {:7.2f} Hz | {:8d} {:8.2f} Hz".format(stats.totsec, mst, stats.wincnt, stats.wincnt/interval, stats.totcnt + stats.wincnt, (stats.totcnt + stats.wincnt) / stats.totsec))
plock.release()
slock.acquire()
stats.totcnt += stats.wincnt
stats.wincnt = 0
slock.release()
rtmr = RptTimer(1.0/float(r),task,[global_stats[key], slock])
rtmr.start()
stmr = RptTimer(interval, print_stats, [global_stats[key], tag, slock])
stmr.start()
def recv(m, s, t, interval):
if int(t) in {DATA_TYP_POS, DATA_TYP_HB_ORANGE, DATA_TYP_HB_GREEN}:
adu = Position()
elif int(t) == DATA_TYP_DIS:
adu = Distance()
else:
raise Exception('data type %d not supported' % (int(t)))
print("Subscribed to [%s/%s/%s]" % (m,s,t))
tag = GapsTag(int(m), int(s), int(t))
makesock = xdc_so.xdc_sub_socket
makesock.restype = c_void_p
sock = makesock(tag)
key = get_key('r', m, s, t)
global_stats[key] = Stats()
rlock = Lock()
def print_stats(stats,tag,rlock):
stats.totsec += interval
mst = "%d/%d/%d" % (tag.mux, tag.sec, tag.typ)
plock.acquire()
print("{:5.2f}s | recv | {:8s} | {:5d} {:7.2f} Hz | {:8d} {:8.2f} Hz".format(stats.totsec, mst, stats.wincnt, stats.wincnt/interval, stats.totcnt + stats.wincnt, (stats.totcnt + stats.wincnt) / stats.totsec))
plock.release()
rlock.acquire()
stats.totcnt += stats.wincnt
stats.wincnt = 0
rlock.release()
stmr = RptTimer(interval,print_stats,[global_stats[key],tag,rlock])
stmr.start()
while True:
xdc_so.xdc_blocking_recv(c_void_p(sock), pointer(adu), pointer(tag))
rlock.acquire()
global_stats[key].wincnt += 1
rlock.release()
if verbose:
plock.acquire()
print('%f recv_msg: [%d/%d/%d] -- (%f,%f,%f)' % (time.time(), tag.mux,tag.sec,tag.typ,adu.x,adu.y,adu.z))
plock.release()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--send', nargs=4, action='append', metavar=('MUX', 'SEC', 'TYP', 'RATE'), help='send cross-domain flow using MUX/SEC/TYP at RATE (Hz)')
parser.add_argument('-r', '--recv', nargs=3, action='append', metavar=('MUX', 'SEC', 'TYP'), help='recv cross-domain flow mapped to MUX/SEC/TYP')
parser.add_argument('-l', metavar=('PATH'), help="path to mission app shared libraries (default=../appgen/6month-demo)", default='../appgen/6month-demo')
parser.add_argument('-x', metavar=('PATH'), help="path to libxdcomms.so (default=../api)", default='../api')
parser.add_argument('-i', metavar=('URI'), help="in URI (default=ipc:///tmp/halpub1)", default='ipc:///tmp/halpub1')
parser.add_argument('-o', metavar=('URI'), help="out URI (default=ipc:///tmp/halsub1)", default='ipc:///tmp/halsub1')
parser.add_argument('--interval', help="reporting interval, default=10s", default=10)
parser.add_argument('-t', help="duration of test in seconds, if not specified, runs indefinitely", default=0)
parser.add_argument('-v', help="verbose mode, logs every message", action='store_true', default=False)
args = parser.parse_args()
xdc_so = CDLL(args.x + '/libxdcomms.so', use_errno=True)
gma_so = CDLL(args.l + '/libgma.so')
# Check verbose mode
verbose = True if args.v == True else False
# Set the URIs for ZMQ
xdc_so.xdc_ctx()
xdc_so.xdc_set_in(c_char_p((args.i).encode('utf-8')))
xdc_so.xdc_set_out(c_char_p((args.o).encode('utf-8')))
# Register encode/decode functions; TODO: make spec-driven
xdc_so.xdc_register(gma_so.position_data_encode, gma_so.position_data_decode, DATA_TYP_POS)
xdc_so.xdc_register(gma_so.distance_data_encode, gma_so.distance_data_decode, DATA_TYP_DIS)
xdc_so.xdc_register(gma_so.position_data_encode, gma_so.position_data_decode, DATA_TYP_HB_ORANGE)
xdc_so.xdc_register(gma_so.position_data_encode, gma_so.position_data_decode, DATA_TYP_HB_GREEN)
start = time.time()
if args.send:
for s in args.send:
s.append(float(args.interval))
t = Thread(args=s, target=send)
t.start()
if args.recv:
for r in args.recv:
r.append(float(args.interval))
t = Thread(args=r, target=recv)
t.start()
def print_totals():
end = time.time()
print("\n\nMESSAGE TOTALS\n------")
for key in global_stats:
(d,m,s,t) = key.split('-')
d = 'sent' if d == 's' else 'received'
print('%s/%s/%s - %s %d messages' % (m,s,t,d,global_stats[key].totcnt + global_stats[key].wincnt))
print('elapsed time: %.2fs' % (end - start))
def self_kill():
print_totals()
os.kill(os.getpid(), signal.SIGKILL)
if float(args.t) != 0:
tm = Timer(float(args.t), self_kill)
tm.start()
while True:
try:
time.sleep(99999)
except KeyboardInterrupt:
self_kill()
|
shutdown.pyw
|
from PyQt5.QtWidgets import (QMainWindow, QApplication, QPushButton, QWidget, QGroupBox, QLabel,
QCheckBox, QAction, QTabWidget, QVBoxLayout, QHBoxLayout, QGridLayout,
QLineEdit, QTextEdit, QShortcut, QMessageBox)
from PyQt5.QtGui import QIcon, QFont, QKeySequence, QCursor
from PyQt5.QtCore import pyqtSlot, Qt
import os
import sys
import threading
class App(QMainWindow):
def __init__(self):
super().__init__()
self.title = 'Schedule Shutdown'
self.left = 360
self.top = 240
self.width = 640
self.height = 480
self.setFixedSize(self.width, self.height)
self.setWindowFlags(Qt.WindowStaysOnTopHint)
self.setWindowIcon(QIcon('icon.ico'))
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.shortcut = QShortcut(QKeySequence("Ctrl+W"), self)
self.shortcut.activated.connect(self.close)
self.statusBar().showMessage(' Hibernation command: "shutdown -h"')
self.table_widget = MyTableWidget(self)
self.setCentralWidget(self.table_widget)
self.table_widget.countdown(60, new=True)
self.show()
def closeEvent(self, event):
self.table_widget.label.setText(' 1:00') # stop countdown
event.accept() # let the window close
# event.ignore()
class MyTableWidget(QWidget):
def __init__(self, parent):
super(QWidget, self).__init__(parent)
self.layout = QVBoxLayout(self)
# Initialize tab screen
self.tabs = QTabWidget()
self.tab1 = QWidget()
self.tab2 = QWidget()
# self.tabs.resize(300, 200)
tab_font = QFont("Frutiger Next LT CE", 9, QFont.Light)
self.tabs.setFont(tab_font)
# Add tabs
self.tabs.addTab(self.tab1, "Settings")
self.tabs.addTab(self.tab2, "Help")
self.set_tab_1()
self.set_tab_2()
# Add tabs to widget
self.layout.addWidget(self.tabs)
self.setLayout(self.layout)
def set_tab_1(self):
# Create first tab
frutiger = QFont("Frutiger Next LT CE", 11, QFont.Light)
frutiger_light = QFont("Frutiger Next LT CE", 34, QFont.Light)
frutiger_small = QFont("Frutiger Next LT CE", 22)
frutiger_select = QFont("Frutiger Next LT CE", 18, QFont.Normal)
monaco = QFont("monaco", 15, QFont.Normal)
monaco_big = QFont("monaco", 34, QFont.Normal)
monaco_small = QFont("monaco", 24, QFont.Normal)
self.tab1.layout = QVBoxLayout(self)
self.horizontalGroupBox1 = QGroupBox("Set a Schedule")
self.horizontalGroupBox1.setFont(frutiger)
self.horizontalGroupBox1.setStyleSheet("padding: 15px; border: 1px solid lightgray; border-radius: 0")
layout = QGridLayout()
layout.setSpacing(10)
label1 = QLabel('Shutdown after', self)
label1.setFont(frutiger_light)
label1.setStyleSheet("padding: 0; border: 0; padding-right: 0;")
layout.addWidget(label1, 1, 0, 1, 1)
self.textbox = QLineEdit(self)
self.textbox.setPlaceholderText('1')
self.textbox.setFocusPolicy(Qt.StrongFocus)
self.textbox.setFont(monaco_big)
self.textbox.setAlignment(Qt.AlignCenter)
self.textbox.setStyleSheet("padding: 0; border: 0; border-bottom: 2px solid black; border-radius: 0")
self.textbox.returnPressed.connect(self.set_schedule)
layout.addWidget(self.textbox, 1, 1, 1, 1)
label2 = QLabel('minutes.', self)
label2.setFont(frutiger_light)
label2.setStyleSheet("padding: 0; border: 0; padding-right: 0;")
layout.addWidget(label2, 1, 2, 1, 1)
button_shutdown = QPushButton('[ confirm ]', self)
button_shutdown.clicked.connect(self.set_schedule)
button_shutdown.setFont(frutiger)
button_shutdown.setStyleSheet("padding: 0; margin-left: 80px; margin-right: 10px; "
"border: 0; border-bottom: 0px solid lightgray; ")
button_shutdown.setCursor(QCursor(Qt.PointingHandCursor))
layout.addWidget(button_shutdown, 2, 2, 1, 1)
self.box = QCheckBox("hibernate instead of performing a full shutdown")
self.box.setStyleSheet("padding: 0; margin:0; padding-top: 5px; border: 0")
self.box.setFont(frutiger)
self.box.setChecked(True)
layout.addWidget(self.box, 2, 0, 1, 2)
self.horizontalGroupBox1.setLayout(layout)
self.tab1.layout.addWidget(self.horizontalGroupBox1)
self.horizontalGroupBox2 = QGroupBox("Select a Command")
self.horizontalGroupBox2.setFont(frutiger)
self.horizontalGroupBox2.setStyleSheet("padding: 15px; border: 1px solid lightgray; border-radius: 0")
buttons = QGridLayout()
buttons.setSpacing(10)
label_shutdown = QLabel('Shutdown:', self)
label_shutdown.setFont(frutiger_select)
label_shutdown.setStyleSheet("padding: 0; border: 0; padding-right: 0;")
buttons.addWidget(label_shutdown, 0, 0, 1, 1)
self.textbox_shutdown = QLineEdit(self)
self.textbox_shutdown.setText('shutdown -s -f -t 60')
self.textbox_shutdown.setFocusPolicy(Qt.StrongFocus)
self.textbox_shutdown.setFont(monaco)
self.textbox_shutdown.setStyleSheet("padding: 0; border: 0; margin-left: 30px; margin-right: 20px;"
"border-bottom: 1px solid black; border-radius: 0")
self.textbox_shutdown.returnPressed.connect(self.shutdown)
buttons.addWidget(self.textbox_shutdown, 0, 1, 1, 1)
button_shutdown = QPushButton('[ confirm ]', self)
button_shutdown.clicked.connect(self.shutdown)
button_shutdown.setFont(frutiger)
button_shutdown.setStyleSheet("padding: 0; margin-left: 25px; margin-right: 10px; border: 0")
button_shutdown.setCursor(QCursor(Qt.PointingHandCursor))
buttons.addWidget(button_shutdown, 0, 2, 1, 1)
label_restart = QLabel('Restart:', self)
label_restart.setFont(frutiger_select)
label_restart.setStyleSheet("padding: 0; border: 0; padding-right: 0;")
buttons.addWidget(label_restart, 1, 0, 1, 1)
self.textbox_restart = QLineEdit(self)
self.textbox_restart.setText('shutdown -r -f -t 60')
self.textbox_restart.setFocusPolicy(Qt.StrongFocus)
self.textbox_restart.setFont(monaco)
self.textbox_restart.setStyleSheet("padding: 0; border: 0; margin-left: 30px; margin-right: 20px;"
"border-bottom: 1px solid black; border-radius: 0")
self.textbox_restart.returnPressed.connect(self.restart)
buttons.addWidget(self.textbox_restart, 1, 1, 1, 1)
button_restart = QPushButton('[ confirm ]', self)
button_restart.clicked.connect(self.restart)
button_restart.setFont(frutiger)
button_restart.setStyleSheet("padding: 0; margin-left: 25px; margin-right: 10px; border: 0")
button_restart.setCursor(QCursor(Qt.PointingHandCursor))
buttons.addWidget(button_restart, 1, 2, 1, 1)
label_cancel = QLabel('Abort:', self)
label_cancel.setFont(frutiger_select)
label_cancel.setStyleSheet("padding: 0; border: 0; padding-right: 0;")
buttons.addWidget(label_cancel, 2, 0, 1, 1)
self.textbox_cancel = QLineEdit(self)
self.textbox_cancel.setText('shutdown -a')
self.textbox_cancel.setFocusPolicy(Qt.StrongFocus)
self.textbox_cancel.setFont(monaco)
self.textbox_cancel.setStyleSheet("padding: 0; border: 0; margin-left: 30px; margin-right: 20px;"
"border-bottom: 1px solid black; border-radius: 0")
self.textbox_cancel.returnPressed.connect(self.cancel)
buttons.addWidget(self.textbox_cancel, 2, 1, 1, 1)
button_cancel = QPushButton('[ confirm ]', self)
button_cancel.clicked.connect(self.cancel)
button_cancel.setFont(frutiger)
button_cancel.setStyleSheet("padding: 0; margin-left: 25px; margin-right: 10px; border: 0")
button_cancel.setCursor(QCursor(Qt.PointingHandCursor))
buttons.addWidget(button_cancel, 2, 2, 1, 1)
self.horizontalGroupBox2.setLayout(buttons)
self.tab1.layout.addWidget(self.horizontalGroupBox2)
self.horizontalGroupBox3 = QGroupBox("Countdown Timer")
self.horizontalGroupBox3.setFont(frutiger)
self.horizontalGroupBox3.setStyleSheet("padding: 15px; border: 1px solid lightgray; border-radius: 0")
cd = QGridLayout()
cd.setSpacing(10)
label3 = QLabel('Remaining time until Shutdown:', self)
label3.setAlignment(Qt.AlignLeft)
label3.setFont(frutiger_small)
label3.setStyleSheet("padding: 0; padding-top: 10px; border: 0")
cd.addWidget(label3, 2, 0, 1, 2)
self.label = QLabel(' 1:00', self)
self.label.setAlignment(Qt.AlignRight)
self.label.setFont(monaco_small)
self.label.setStyleSheet("padding: 0; padding-top: 5px; border: 0")
cd.addWidget(self.label, 2, 2, 1, 1)
abort = QPushButton()
abort.clicked.connect(self.abort)
abort.setCursor(QCursor(Qt.PointingHandCursor))
abort.setStyleSheet("margin-left: 30px; margin-top: 3px; border: 0")
cd.addWidget(abort, 2, 2, 1, 1)
self.horizontalGroupBox3.setLayout(cd)
self.tab1.layout.addWidget(self.horizontalGroupBox3)
self.tab1.setLayout(self.tab1.layout)
@pyqtSlot()
def on_click(self):
print("on_click")
@pyqtSlot()
def shutdown(self):
text = self.textbox_shutdown.text()
button_reply = QMessageBox.question(self, 'PyQt5 message', "Shut down?\n> {}".format(text),
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if button_reply == QMessageBox.Yes:
# print('Yes clicked.')
os.popen(text)
exit(0)
@pyqtSlot()
def restart(self):
text = self.textbox_restart.text()
button_reply = QMessageBox.question(self, 'PyQt5 message', "Restart?\n> {}".format(text),
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if button_reply == QMessageBox.Yes:
os.popen(text)
exit(0)
@pyqtSlot()
def cancel(self):
text = self.textbox_cancel.text()
self.textbox_cancel.setText('shutdown -a')
os.popen(text)
@pyqtSlot()
def abort(self):
text = self.label.text()
self.label.setText(' ' + text)
@pyqtSlot()
def set_schedule(self):
if not self.textbox.text():
self.textbox.setText('1')
textbox_value = self.textbox.text()
if textbox_value.isdigit():
seconds = max(int(textbox_value) * 60, 20)
self.countdown(seconds, new=True)
def countdown(self, remaining, new=False):
if not new and self.label.text() != '{}:{:02}'.format(*divmod(remaining + 1, 60)):
return # updated somewhere else
self.label.setText('{}:{:02}'.format(*divmod(remaining, 60)))
if remaining <= 0:
if self.box.isChecked():
# Process(target=os.system, args=('ping 127.0.0.1 -n 6 > nul & shutdown -h',)).start()
os.popen('ping 127.0.0.1 -n 16 > nul & shutdown -h')
os._exit(0)
else:
os.popen('ping 127.0.0.1 -n 6 > nul & shutdown /s /f /t 30')
os._exit(0)
return
t = threading.Timer(1, self.countdown, (remaining - 1,))
t.start()
def set_tab_2(self):
# Create first tab
frutiger = QFont("Frutiger Next LT CE", 11, QFont.Light)
monaco = QFont("monaco", 11, QFont.Normal)
self.tab2.layout = QVBoxLayout(self)
self.horizontalGroupBox = QGroupBox("Commands")
self.horizontalGroupBox.setFont(frutiger)
self.horizontalGroupBox.setStyleSheet("padding: 15px; border: 1px solid lightgray; border-radius: 0")
cmd = QGridLayout()
cmd.setSpacing(10)
c = "> shutdown /s /f /t 0 " \
"# Shutdown the computer<br/>" \
"> shutdown /s /hybrid " \
"# Shutdown (for fast startup)<br/>" \
"> shutdown /h " \
"# Hibernate the local computer<br/>" \
"> shutdown /r " \
"# Full shutdown and restart<br/>" \
"> shutdown /r /o " \
"# Advanced Boot Options Menu<br/>" \
"> shutdown /a " \
"# Abort a system shutdown"
tx = QTextEdit(c, self)
tx.setAlignment(Qt.AlignLeft)
tx.setFont(monaco)
tx.setStyleSheet("padding: 0; border: 0;")
tx.setReadOnly(True)
tx.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
cmd.addWidget(tx)
self.horizontalGroupBox.setLayout(cmd)
self.tab2.layout.addWidget(self.horizontalGroupBox)
self.horizontalGroupBox = QGroupBox("Copyright")
self.horizontalGroupBox.setFont(frutiger)
self.horizontalGroupBox.setStyleSheet("padding: 20px; border: 1px solid lightgray; border-radius: 0")
cd = QGridLayout()
cd.setSpacing(10)
with open('LICENSE') as f:
c = f.read()
ls = QTextEdit(c.replace('\n\n', '<br/>').replace('Permission', '<br/>Permission'), self)
ls.setAlignment(Qt.AlignLeft)
ls.setFont(monaco)
ls.setStyleSheet("padding: 0; border: 0;")
ls.setReadOnly(True)
ls.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
cd.addWidget(ls)
self.horizontalGroupBox.setLayout(cd)
self.tab2.layout.addWidget(self.horizontalGroupBox)
self.horizontalGroupBox = QGroupBox("About")
self.horizontalGroupBox.setFont(frutiger)
self.horizontalGroupBox.setStyleSheet("padding: 15px; border: 1px solid lightgray; border-radius: 0")
info = QGridLayout()
info.setSpacing(10)
label4 = QLabel('Version: 0.1.0', self)
label4.setAlignment(Qt.AlignCenter)
label4.setFont(monaco)
label4.setStyleSheet("padding: 0; border: 0")
info.addWidget(label4, 0, 0, 1, 1)
label5 = QLabel('GitHub: <a href="https://github.com/bugstop/schedule-shutdown-gui" '
'style="color: black !important; text-decoration: none">bugstop</a>', self)
label5.setAlignment(Qt.AlignCenter)
label5.setFont(monaco)
label5.setStyleSheet("padding: 0; border: 0;")
label5.setOpenExternalLinks(True)
info.addWidget(label5, 0, 1, 1, 1)
self.horizontalGroupBox.setLayout(info)
self.tab2.layout.addWidget(self.horizontalGroupBox)
self.tab2.setLayout(self.tab2.layout)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.table_widget.textbox.setFocus()
sys.exit(app.exec_())
|
common.py
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import json
import yaml
import logging
import os
import re
import subprocess
import stat
import urllib.parse
import threading
import contextlib
import tempfile
import psutil
from functools import reduce, wraps
from decimal import Decimal
# Django
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import cached_property
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
from django.db.models.fields.related_descriptors import (
ForwardManyToOneDescriptor,
ManyToManyDescriptor
)
from django.db.models.query import QuerySet
from django.db.models import Q
# Django REST Framework
from rest_framework.exceptions import ParseError
from django.utils.encoding import smart_str
from django.utils.text import slugify
from django.apps import apps
# AWX
from awx.conf.license import get_license
logger = logging.getLogger('awx.main.utils')
__all__ = [
'get_object_or_400', 'camelcase_to_underscore', 'underscore_to_camelcase', 'memoize',
'memoize_delete', 'get_ansible_version', 'get_licenser', 'get_awx_http_client_headers',
'get_awx_version', 'update_scm_url', 'get_type_for_model', 'get_model_for_type',
'copy_model_by_class', 'copy_m2m_relationships',
'prefetch_page_capabilities', 'to_python_boolean', 'ignore_inventory_computed_fields',
'ignore_inventory_group_removal', '_inventory_updates', 'get_pk_from_dict', 'getattrd',
'getattr_dne', 'NoDefaultProvided', 'get_current_apps', 'set_current_apps',
'extract_ansible_vars', 'get_search_fields', 'get_system_task_capacity',
'get_cpu_capacity', 'get_mem_capacity', 'wrap_args_with_proot', 'build_proot_temp_dir',
'check_proot_installed', 'model_to_dict', 'NullablePromptPseudoField',
'model_instance_diff', 'parse_yaml_or_json', 'RequireDebugTrueOrTest',
'has_model_field_prefetched', 'set_environ', 'IllegalArgumentError',
'get_custom_venv_choices', 'get_external_account', 'task_manager_bulk_reschedule',
'schedule_task_manager', 'classproperty', 'create_temporary_fifo', 'truncate_stdout',
'deepmerge'
]
def get_object_or_400(klass, *args, **kwargs):
'''
Return a single object from the given model or queryset based on the query
params, otherwise raise an exception that will return in a 400 response.
'''
from django.shortcuts import _get_queryset
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist as e:
raise ParseError(*e.args)
except queryset.model.MultipleObjectsReturned as e:
raise ParseError(*e.args)
def to_python_boolean(value, allow_none=False):
value = str(value)
if value.lower() in ('true', '1', 't'):
return True
elif value.lower() in ('false', '0', 'f'):
return False
elif allow_none and value.lower() in ('none', 'null'):
return None
else:
raise ValueError(_(u'Unable to convert "%s" to boolean') % value)
def camelcase_to_underscore(s):
'''
Convert CamelCase names to lowercase_with_underscore.
'''
s = re.sub(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', s)
return s.lower().strip('_')
def underscore_to_camelcase(s):
'''
Convert lowercase_with_underscore names to CamelCase.
'''
return ''.join(x.capitalize() or '_' for x in s.split('_'))
class RequireDebugTrueOrTest(logging.Filter):
'''
Logging filter to output when in DEBUG mode or running tests.
'''
def filter(self, record):
from django.conf import settings
return settings.DEBUG or settings.IS_TESTING()
class IllegalArgumentError(ValueError):
pass
def get_memoize_cache():
from django.core.cache import cache
return cache
def memoize(ttl=60, cache_key=None, track_function=False, cache=None):
'''
Decorator to wrap a function and cache its result.
'''
if cache_key and track_function:
raise IllegalArgumentError("Can not specify cache_key when track_function is True")
cache = cache or get_memoize_cache()
def memoize_decorator(f):
@wraps(f)
def _memoizer(*args, **kwargs):
if track_function:
cache_dict_key = slugify('%r %r' % (args, kwargs))
key = slugify("%s" % f.__name__)
cache_dict = cache.get(key) or dict()
if cache_dict_key not in cache_dict:
value = f(*args, **kwargs)
cache_dict[cache_dict_key] = value
cache.set(key, cache_dict, ttl)
else:
value = cache_dict[cache_dict_key]
else:
key = cache_key or slugify('%s %r %r' % (f.__name__, args, kwargs))
value = cache.get(key)
if value is None:
value = f(*args, **kwargs)
cache.set(key, value, ttl)
return value
return _memoizer
return memoize_decorator
def memoize_delete(function_name):
cache = get_memoize_cache()
return cache.delete(function_name)
@memoize()
def get_ansible_version():
'''
Return Ansible version installed.
Ansible path needs to be provided to account for custom virtual environments
'''
try:
proc = subprocess.Popen(['ansible', '--version'],
stdout=subprocess.PIPE)
result = smart_str(proc.communicate()[0])
return result.split('\n')[0].replace('ansible', '').strip()
except Exception:
return 'unknown'
def get_awx_version():
'''
Return AWX version as reported by setuptools.
'''
from awx import __version__
try:
import pkg_resources
return pkg_resources.require('awx')[0].version
except Exception:
return __version__
def get_awx_http_client_headers():
license = get_license().get('license_type', 'UNLICENSED')
headers = {
'Content-Type': 'application/json',
'User-Agent': '{} {} ({})'.format(
'AWX' if license == 'open' else 'Red Hat Ansible Tower',
get_awx_version(),
license
)
}
return headers
def get_licenser(*args, **kwargs):
from awx.main.utils.licensing import Licenser, OpenLicense
try:
if os.path.exists('/var/lib/awx/.tower_version'):
return Licenser(*args, **kwargs)
else:
return OpenLicense()
except Exception as e:
raise ValueError(_('Error importing Tower License: %s') % e)
def update_scm_url(scm_type, url, username=True, password=True,
check_special_cases=True, scp_format=False):
'''
Update the given SCM URL to add/replace/remove the username/password. When
username/password is True, preserve existing username/password, when
False (None, '', etc.), remove any existing username/password, otherwise
replace username/password. Also validates the given URL.
'''
# Handle all of the URL formats supported by the SCM systems:
# git: https://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
# svn: http://svnbook.red-bean.com/en/1.7/svn-book.html#svn.advanced.reposurls
if scm_type not in ('git', 'svn', 'insights', 'archive'):
raise ValueError(_('Unsupported SCM type "%s"') % str(scm_type))
if not url.strip():
return ''
parts = urllib.parse.urlsplit(url)
try:
parts.port
except ValueError:
raise ValueError(_('Invalid %s URL') % scm_type)
if parts.scheme == 'git+ssh' and not scp_format:
raise ValueError(_('Unsupported %s URL') % scm_type)
if '://' not in url:
# Handle SCP-style URLs for git (e.g. [user@]host.xz:path/to/repo.git/).
if scm_type == 'git' and ':' in url:
if '@' in url:
userpass, hostpath = url.split('@', 1)
else:
userpass, hostpath = '', url
if hostpath.count(':') > 1:
raise ValueError(_('Invalid %s URL') % scm_type)
host, path = hostpath.split(':', 1)
#if not path.startswith('/') and not path.startswith('~/'):
# path = '~/%s' % path
#if path.startswith('/'):
# path = path.lstrip('/')
hostpath = '/'.join([host, path])
modified_url = '@'.join(filter(None, [userpass, hostpath]))
# git+ssh scheme identifies URLs that should be converted back to
# SCP style before passed to git module.
parts = urllib.parse.urlsplit('git+ssh://%s' % modified_url)
# Handle local paths specified without file scheme (e.g. /path/to/foo).
# Only supported by git.
elif scm_type == 'git':
if not url.startswith('/'):
parts = urllib.parse.urlsplit('file:///%s' % url)
else:
parts = urllib.parse.urlsplit('file://%s' % url)
else:
raise ValueError(_('Invalid %s URL') % scm_type)
# Validate that scheme is valid for given scm_type.
scm_type_schemes = {
'git': ('ssh', 'git', 'git+ssh', 'http', 'https', 'ftp', 'ftps', 'file'),
'svn': ('http', 'https', 'svn', 'svn+ssh', 'file'),
'insights': ('http', 'https'),
'archive': ('http', 'https'),
}
if parts.scheme not in scm_type_schemes.get(scm_type, ()):
raise ValueError(_('Unsupported %s URL') % scm_type)
if parts.scheme == 'file' and parts.netloc not in ('', 'localhost'):
raise ValueError(_('Unsupported host "%s" for file:// URL') % (parts.netloc))
elif parts.scheme != 'file' and not parts.netloc:
raise ValueError(_('Host is required for %s URL') % parts.scheme)
if username is True:
netloc_username = parts.username or ''
elif username:
netloc_username = username
else:
netloc_username = ''
if password is True:
netloc_password = parts.password or ''
elif password:
netloc_password = password
else:
netloc_password = ''
# Special handling for github/bitbucket SSH URLs.
if check_special_cases:
special_git_hosts = ('github.com', 'bitbucket.org', 'altssh.bitbucket.org')
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_username != 'git':
raise ValueError(_('Username must be "git" for SSH access to %s.') % parts.hostname)
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_password:
#raise ValueError('Password not allowed for SSH access to %s.' % parts.hostname)
netloc_password = ''
if netloc_username and parts.scheme != 'file' and scm_type not in ("insights", "archive"):
netloc = u':'.join([urllib.parse.quote(x,safe='') for x in (netloc_username, netloc_password) if x])
else:
netloc = u''
netloc = u'@'.join(filter(None, [netloc, parts.hostname]))
if parts.port:
netloc = u':'.join([netloc, str(parts.port)])
new_url = urllib.parse.urlunsplit([parts.scheme, netloc, parts.path,
parts.query, parts.fragment])
if scp_format and parts.scheme == 'git+ssh':
new_url = new_url.replace('git+ssh://', '', 1).replace('/', ':', 1)
return new_url
def get_allowed_fields(obj, serializer_mapping):
if serializer_mapping is not None and obj.__class__ in serializer_mapping:
serializer_actual = serializer_mapping[obj.__class__]()
allowed_fields = [x for x in serializer_actual.fields if not serializer_actual.fields[x].read_only] + ['id']
else:
allowed_fields = [x.name for x in obj._meta.fields]
ACTIVITY_STREAM_FIELD_EXCLUSIONS = {
'user': ['last_login'],
'oauth2accesstoken': ['last_used'],
'oauth2application': ['client_secret']
}
model_name = obj._meta.model_name
fields_excluded = ACTIVITY_STREAM_FIELD_EXCLUSIONS.get(model_name, [])
# see definition of from_db for CredentialType
# injection logic of any managed types are incompatible with activity stream
if model_name == 'credentialtype' and obj.managed_by_tower and obj.namespace:
fields_excluded.extend(['inputs', 'injectors'])
if fields_excluded:
allowed_fields = [f for f in allowed_fields if f not in fields_excluded]
return allowed_fields
def _convert_model_field_for_display(obj, field_name, password_fields=None):
# NOTE: Careful modifying the value of field_val, as it could modify
# underlying model object field value also.
try:
field_val = getattr(obj, field_name, None)
except ObjectDoesNotExist:
return '<missing {}>-{}'.format(obj._meta.verbose_name, getattr(obj, '{}_id'.format(field_name)))
if password_fields is None:
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
if field_name in password_fields or (
isinstance(field_val, str) and
field_val.startswith('$encrypted$')
):
return u'hidden'
if hasattr(obj, 'display_%s' % field_name):
field_val = getattr(obj, 'display_%s' % field_name)()
if isinstance(field_val, (list, dict)):
try:
field_val = json.dumps(field_val, ensure_ascii=False)
except Exception:
pass
if type(field_val) not in (bool, int, type(None)):
field_val = smart_str(field_val)
return field_val
def model_instance_diff(old, new, serializer_mapping=None):
"""
Calculate the differences between two model instances. One of the instances may be None (i.e., a newly
created model or deleted model). This will cause all fields with a value to have changed (from None).
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
from django.db.models import Model
if not(old is None or isinstance(old, Model)):
raise TypeError('The supplied old instance is not a valid model instance.')
if not(new is None or isinstance(new, Model)):
raise TypeError('The supplied new instance is not a valid model instance.')
old_password_fields = set(getattr(type(old), 'PASSWORD_FIELDS', [])) | set(['password'])
new_password_fields = set(getattr(type(new), 'PASSWORD_FIELDS', [])) | set(['password'])
diff = {}
allowed_fields = get_allowed_fields(new, serializer_mapping)
for field in allowed_fields:
old_value = getattr(old, field, None)
new_value = getattr(new, field, None)
if old_value != new_value:
diff[field] = (
_convert_model_field_for_display(old, field, password_fields=old_password_fields),
_convert_model_field_for_display(new, field, password_fields=new_password_fields),
)
if len(diff) == 0:
diff = None
return diff
def model_to_dict(obj, serializer_mapping=None):
"""
Serialize a model instance to a dictionary as best as possible
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
attr_d = {}
allowed_fields = get_allowed_fields(obj, serializer_mapping)
for field_name in allowed_fields:
attr_d[field_name] = _convert_model_field_for_display(obj, field_name, password_fields=password_fields)
return attr_d
class CharPromptDescriptor:
"""Class used for identifying nullable launch config fields from class
ex. Schedule.limit
"""
def __init__(self, field):
self.field = field
class NullablePromptPseudoField:
"""
Interface for pseudo-property stored in `char_prompts` dict
Used in LaunchTimeConfig and submodels, defined here to avoid circular imports
"""
def __init__(self, field_name):
self.field_name = field_name
@cached_property
def field_descriptor(self):
return CharPromptDescriptor(self)
def __get__(self, instance, type=None):
if instance is None:
# for inspection on class itself
return self.field_descriptor
return instance.char_prompts.get(self.field_name, None)
def __set__(self, instance, value):
if value in (None, {}):
instance.char_prompts.pop(self.field_name, None)
else:
instance.char_prompts[self.field_name] = value
def copy_model_by_class(obj1, Class2, fields, kwargs):
'''
Creates a new unsaved object of type Class2 using the fields from obj1
values in kwargs can override obj1
'''
create_kwargs = {}
for field_name in fields:
descriptor = getattr(Class2, field_name)
if isinstance(descriptor, ForwardManyToOneDescriptor): # ForeignKey
# Foreign keys can be specified as field_name or field_name_id.
id_field_name = '%s_id' % field_name
if field_name in kwargs:
value = kwargs[field_name]
elif id_field_name in kwargs:
value = kwargs[id_field_name]
else:
value = getattr(obj1, id_field_name)
if hasattr(value, 'id'):
value = value.id
create_kwargs[id_field_name] = value
elif isinstance(descriptor, CharPromptDescriptor):
# difficult case of copying one launch config to another launch config
new_val = None
if field_name in kwargs:
new_val = kwargs[field_name]
elif hasattr(obj1, 'char_prompts'):
if field_name in obj1.char_prompts:
new_val = obj1.char_prompts[field_name]
elif hasattr(obj1, field_name):
# extremely rare case where a template spawns a launch config - sliced jobs
new_val = getattr(obj1, field_name)
if new_val is not None:
create_kwargs.setdefault('char_prompts', {})
create_kwargs['char_prompts'][field_name] = new_val
elif isinstance(descriptor, ManyToManyDescriptor):
continue # not copied in this method
elif field_name in kwargs:
if field_name == 'extra_vars' and isinstance(kwargs[field_name], dict):
create_kwargs[field_name] = json.dumps(kwargs['extra_vars'])
elif not isinstance(Class2._meta.get_field(field_name), (ForeignObjectRel, ManyToManyField)):
create_kwargs[field_name] = kwargs[field_name]
elif hasattr(obj1, field_name):
create_kwargs[field_name] = getattr(obj1, field_name)
# Apply class-specific extra processing for origination of unified jobs
if hasattr(obj1, '_update_unified_job_kwargs') and obj1.__class__ != Class2:
new_kwargs = obj1._update_unified_job_kwargs(create_kwargs, kwargs)
else:
new_kwargs = create_kwargs
return Class2(**new_kwargs)
def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):
'''
In-place operation.
Given two saved objects, copies related objects from obj1
to obj2 to field of same name, if field occurs in `fields`
'''
for field_name in fields:
if hasattr(obj1, field_name):
try:
field_obj = obj1._meta.get_field(field_name)
except FieldDoesNotExist:
continue
if isinstance(field_obj, ManyToManyField):
# Many to Many can be specified as field_name
src_field_value = getattr(obj1, field_name)
if kwargs and field_name in kwargs:
override_field_val = kwargs[field_name]
if isinstance(override_field_val, (set, list, QuerySet)):
getattr(obj2, field_name).add(*override_field_val)
continue
if override_field_val.__class__.__name__ == 'ManyRelatedManager':
src_field_value = override_field_val
dest_field = getattr(obj2, field_name)
dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))
def get_type_for_model(model):
'''
Return type name for a given model class.
'''
opts = model._meta.concrete_model._meta
return camelcase_to_underscore(opts.object_name)
def get_model_for_type(type_name):
'''
Return model class for a given type name.
'''
model_str = underscore_to_camelcase(type_name)
if model_str == 'User':
use_app = 'auth'
else:
use_app = 'main'
return apps.get_model(use_app, model_str)
def prefetch_page_capabilities(model, page, prefetch_list, user):
'''
Given a `page` list of objects, a nested dictionary of user_capabilities
are returned by id, ex.
{
4: {'edit': True, 'start': True},
6: {'edit': False, 'start': False}
}
Each capability is produced for all items in the page in a single query
Examples of prefetch language:
prefetch_list = ['admin', 'execute']
--> prefetch the admin (edit) and execute (start) permissions for
items in list for current user
prefetch_list = ['inventory.admin']
--> prefetch the related inventory FK permissions for current user,
and put it into the object's cache
prefetch_list = [{'copy': ['inventory.admin', 'project.admin']}]
--> prefetch logical combination of admin permission to inventory AND
project, put into cache dictionary as "copy"
'''
page_ids = [obj.id for obj in page]
mapping = {}
for obj in page:
mapping[obj.id] = {}
for prefetch_entry in prefetch_list:
display_method = None
if type(prefetch_entry) is dict:
display_method = list(prefetch_entry.keys())[0]
paths = prefetch_entry[display_method]
else:
paths = prefetch_entry
if type(paths) is not list:
paths = [paths]
# Build the query for accessible_objects according the user & role(s)
filter_args = []
for role_path in paths:
if '.' in role_path:
res_path = '__'.join(role_path.split('.')[:-1])
role_type = role_path.split('.')[-1]
parent_model = model
for subpath in role_path.split('.')[:-1]:
parent_model = parent_model._meta.get_field(subpath).related_model
filter_args.append(Q(
Q(**{'%s__pk__in' % res_path: parent_model.accessible_pk_qs(user, '%s_role' % role_type)}) |
Q(**{'%s__isnull' % res_path: True})))
else:
role_type = role_path
filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)}))
if display_method is None:
# Role name translation to UI names for methods
display_method = role_type
if role_type == 'admin':
display_method = 'edit'
elif role_type in ['execute', 'update']:
display_method = 'start'
# Union that query with the list of items on page
filter_args.append(Q(pk__in=page_ids))
ids_with_role = set(model.objects.filter(*filter_args).values_list('pk', flat=True))
# Save data item-by-item
for obj in page:
mapping[obj.pk][display_method] = bool(obj.pk in ids_with_role)
return mapping
def validate_vars_type(vars_obj):
if not isinstance(vars_obj, dict):
vars_type = type(vars_obj)
if hasattr(vars_type, '__name__'):
data_type = vars_type.__name__
else:
data_type = str(vars_type)
raise AssertionError(
_('Input type `{data_type}` is not a dictionary').format(
data_type=data_type)
)
def parse_yaml_or_json(vars_str, silent_failure=True):
'''
Attempt to parse a string of variables.
First, with JSON parser, if that fails, then with PyYAML.
If both attempts fail, return an empty dictionary if `silent_failure`
is True, re-raise combination error if `silent_failure` if False.
'''
if isinstance(vars_str, dict):
return vars_str
elif isinstance(vars_str, str) and vars_str == '""':
return {}
try:
vars_dict = json.loads(vars_str)
validate_vars_type(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err:
try:
vars_dict = yaml.safe_load(vars_str)
# Can be None if '---'
if vars_dict is None:
vars_dict = {}
validate_vars_type(vars_dict)
if not silent_failure:
# is valid YAML, check that it is compatible with JSON
try:
json.dumps(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err2:
raise ParseError(_(
'Variables not compatible with JSON standard (error: {json_error})').format(
json_error=str(json_err2)))
except (yaml.YAMLError, TypeError, AttributeError, AssertionError) as yaml_err:
if silent_failure:
return {}
raise ParseError(_(
'Cannot parse as JSON (error: {json_error}) or '
'YAML (error: {yaml_error}).').format(
json_error=str(json_err), yaml_error=str(yaml_err)))
return vars_dict
def get_cpu_capacity():
from django.conf import settings
settings_forkcpu = getattr(settings, 'SYSTEM_TASK_FORKS_CPU', None)
env_forkcpu = os.getenv('SYSTEM_TASK_FORKS_CPU', None)
settings_abscpu = getattr(settings, 'SYSTEM_TASK_ABS_CPU', None)
env_abscpu = os.getenv('SYSTEM_TASK_ABS_CPU', None)
if env_abscpu is not None:
return 0, int(env_abscpu)
elif settings_abscpu is not None:
return 0, int(settings_abscpu)
cpu = psutil.cpu_count()
if env_forkcpu:
forkcpu = int(env_forkcpu)
elif settings_forkcpu:
forkcpu = int(settings_forkcpu)
else:
forkcpu = 4
return (cpu, cpu * forkcpu)
def get_mem_capacity():
from django.conf import settings
settings_forkmem = getattr(settings, 'SYSTEM_TASK_FORKS_MEM', None)
env_forkmem = os.getenv('SYSTEM_TASK_FORKS_MEM', None)
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
env_absmem = os.getenv('SYSTEM_TASK_ABS_MEM', None)
if env_absmem is not None:
return 0, int(env_absmem)
elif settings_absmem is not None:
return 0, int(settings_absmem)
if env_forkmem:
forkmem = int(env_forkmem)
elif settings_forkmem:
forkmem = int(settings_forkmem)
else:
forkmem = 100
mem = psutil.virtual_memory().total
return (mem, max(1, ((mem // 1024 // 1024) - 2048) // forkmem))
def get_system_task_capacity(scale=Decimal(1.0), cpu_capacity=None, mem_capacity=None):
'''
Measure system memory and use it as a baseline for determining the system's capacity
'''
from django.conf import settings
settings_forks = getattr(settings, 'SYSTEM_TASK_FORKS_CAPACITY', None)
env_forks = os.getenv('SYSTEM_TASK_FORKS_CAPACITY', None)
if env_forks:
return int(env_forks)
elif settings_forks:
return int(settings_forks)
if cpu_capacity is None:
_, cpu_cap = get_cpu_capacity()
else:
cpu_cap = cpu_capacity
if mem_capacity is None:
_, mem_cap = get_mem_capacity()
else:
mem_cap = mem_capacity
return min(mem_cap, cpu_cap) + ((max(mem_cap, cpu_cap) - min(mem_cap, cpu_cap)) * scale)
_inventory_updates = threading.local()
_task_manager = threading.local()
@contextlib.contextmanager
def ignore_inventory_computed_fields():
'''
Context manager to ignore updating inventory computed fields.
'''
try:
previous_value = getattr(_inventory_updates, 'is_updating', False)
_inventory_updates.is_updating = True
yield
finally:
_inventory_updates.is_updating = previous_value
def _schedule_task_manager():
from awx.main.scheduler.tasks import run_task_manager
from django.db import connection
# runs right away if not in transaction
connection.on_commit(lambda: run_task_manager.delay())
@contextlib.contextmanager
def task_manager_bulk_reschedule():
"""Context manager to avoid submitting task multiple times.
"""
try:
previous_flag = getattr(_task_manager, 'bulk_reschedule', False)
previous_value = getattr(_task_manager, 'needs_scheduling', False)
_task_manager.bulk_reschedule = True
_task_manager.needs_scheduling = False
yield
finally:
_task_manager.bulk_reschedule = previous_flag
if _task_manager.needs_scheduling:
_schedule_task_manager()
_task_manager.needs_scheduling = previous_value
def schedule_task_manager():
if getattr(_task_manager, 'bulk_reschedule', False):
_task_manager.needs_scheduling = True
return
_schedule_task_manager()
@contextlib.contextmanager
def ignore_inventory_group_removal():
'''
Context manager to ignore moving groups/hosts when group is deleted.
'''
try:
previous_value = getattr(_inventory_updates, 'is_removing', False)
_inventory_updates.is_removing = True
yield
finally:
_inventory_updates.is_removing = previous_value
@contextlib.contextmanager
def set_environ(**environ):
'''
Temporarily set the process environment variables.
>>> with set_environ(FOO='BAR'):
... assert os.environ['FOO'] == 'BAR'
'''
old_environ = os.environ.copy()
try:
os.environ.update(environ)
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
@memoize()
def check_proot_installed():
'''
Check that proot is installed.
'''
from django.conf import settings
cmd = [getattr(settings, 'AWX_PROOT_CMD', 'bwrap'), '--version']
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return bool(proc.returncode == 0)
except (OSError, ValueError) as e:
if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory
logger.exception('bwrap unavailable for unexpected reason.')
return False
def build_proot_temp_dir():
'''
Create a temporary directory for proot to use.
'''
from django.conf import settings
path = tempfile.mkdtemp(prefix='awx_proot_', dir=settings.AWX_PROOT_BASE_PATH)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
return path
def wrap_args_with_proot(args, cwd, **kwargs):
'''
Wrap existing command line with proot to restrict access to:
- AWX_PROOT_BASE_PATH (generally, /tmp) (except for own /tmp files)
For non-isolated nodes:
- /etc/tower (to prevent obtaining db info or secret key)
- /var/lib/awx (except for current project)
- /var/log/tower
- /var/log/supervisor
'''
from django.conf import settings
cwd = os.path.realpath(cwd)
new_args = [getattr(settings, 'AWX_PROOT_CMD', 'bwrap'), '--unshare-pid', '--dev-bind', '/', '/', '--proc', '/proc']
hide_paths = [settings.AWX_PROOT_BASE_PATH]
if not kwargs.get('isolated'):
hide_paths.extend(['/etc/tower', '/var/lib/awx', '/var/log', '/etc/ssh',
settings.PROJECTS_ROOT, settings.JOBOUTPUT_ROOT])
hide_paths.extend(getattr(settings, 'AWX_PROOT_HIDE_PATHS', None) or [])
for path in sorted(set(hide_paths)):
if not os.path.exists(path):
continue
path = os.path.realpath(path)
if os.path.isdir(path):
new_path = tempfile.mkdtemp(dir=kwargs['proot_temp_dir'])
os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
else:
handle, new_path = tempfile.mkstemp(dir=kwargs['proot_temp_dir'])
os.close(handle)
os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR)
new_args.extend(['--bind', '%s' %(new_path,), '%s' % (path,)])
if kwargs.get('isolated'):
show_paths = [kwargs['private_data_dir']]
elif 'private_data_dir' in kwargs:
show_paths = [cwd, kwargs['private_data_dir']]
else:
show_paths = [cwd]
for venv in (
settings.ANSIBLE_VENV_PATH,
settings.AWX_VENV_PATH,
kwargs.get('proot_custom_virtualenv')
):
if venv:
new_args.extend(['--ro-bind', venv, venv])
show_paths.extend(getattr(settings, 'AWX_PROOT_SHOW_PATHS', None) or [])
show_paths.extend(kwargs.get('proot_show_paths', []))
for path in sorted(set(show_paths)):
if not os.path.exists(path):
continue
path = os.path.realpath(path)
new_args.extend(['--bind', '%s' % (path,), '%s' % (path,)])
if kwargs.get('isolated'):
if '/bin/ansible-playbook' in ' '.join(args):
# playbook runs should cwd to the SCM checkout dir
new_args.extend(['--chdir', os.path.join(kwargs['private_data_dir'], 'project')])
else:
# ad-hoc runs should cwd to the root of the private data dir
new_args.extend(['--chdir', kwargs['private_data_dir']])
else:
new_args.extend(['--chdir', cwd])
new_args.extend(args)
return new_args
def get_pk_from_dict(_dict, key):
'''
Helper for obtaining a pk from user data dict or None if not present.
'''
try:
val = _dict[key]
if isinstance(val, object) and hasattr(val, 'id'):
return val.id # return id if given model object
return int(val)
except (TypeError, KeyError, ValueError):
return None
class NoDefaultProvided(object):
pass
def getattrd(obj, name, default=NoDefaultProvided):
"""
Same as getattr(), but allows dot notation lookup
Discussed in:
http://stackoverflow.com/questions/11975781
"""
try:
return reduce(getattr, name.split("."), obj)
except AttributeError:
if default != NoDefaultProvided:
return default
raise
def getattr_dne(obj, name, notfound=ObjectDoesNotExist):
try:
return getattr(obj, name)
except notfound:
return None
current_apps = apps
def set_current_apps(apps):
global current_apps
current_apps = apps
def get_current_apps():
global current_apps
return current_apps
def get_custom_venv_choices(custom_paths=None):
from django.conf import settings
custom_paths = custom_paths or settings.CUSTOM_VENV_PATHS
all_venv_paths = [settings.BASE_VENV_PATH] + custom_paths
custom_venv_choices = []
for custom_venv_path in all_venv_paths:
try:
if os.path.exists(custom_venv_path):
custom_venv_choices.extend([
os.path.join(custom_venv_path, x, '')
for x in os.listdir(custom_venv_path)
if x != 'awx' and
os.path.isdir(os.path.join(custom_venv_path, x)) and
os.path.exists(os.path.join(custom_venv_path, x, 'bin', 'activate'))
])
except Exception:
logger.exception("Encountered an error while discovering custom virtual environments.")
return custom_venv_choices
def is_ansible_variable(key):
return key.startswith('ansible_')
def extract_ansible_vars(extra_vars):
extra_vars = parse_yaml_or_json(extra_vars)
ansible_vars = set([])
for key in list(extra_vars.keys()):
if is_ansible_variable(key):
extra_vars.pop(key)
ansible_vars.add(key)
return (extra_vars, ansible_vars)
def get_search_fields(model):
fields = []
for field in model._meta.fields:
if field.name in ('username', 'first_name', 'last_name', 'email',
'name', 'description'):
fields.append(field.name)
return fields
def has_model_field_prefetched(model_obj, field_name):
# NOTE: Update this function if django internal implementation changes.
return getattr(getattr(model_obj, field_name, None),
'prefetch_cache_name', '') in getattr(model_obj, '_prefetched_objects_cache', {})
def get_external_account(user):
from django.conf import settings
account_type = None
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if user.pk and user.profile.ldap_dn and not user.has_usable_password():
account_type = "ldap"
except AttributeError:
pass
if (getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)) and user.social_auth.all():
account_type = "social"
if (getattr(settings, 'RADIUS_SERVER', None) or
getattr(settings, 'TACACSPLUS_HOST', None)) and user.enterprise_auth.all():
account_type = "enterprise"
return account_type
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
def create_temporary_fifo(data):
"""Open fifo named pipe in a new thread using a temporary file path. The
thread blocks until data is read from the pipe.
Returns the path to the fifo.
:param data(bytes): Data to write to the pipe.
"""
path = os.path.join(tempfile.mkdtemp(), next(tempfile._get_candidate_names()))
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(
target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)
).start()
return path
def truncate_stdout(stdout, size):
from awx.main.constants import ANSI_SGR_PATTERN
if size <= 0 or len(stdout) <= size:
return stdout
stdout = stdout[:(size - 1)] + u'\u2026'
set_count, reset_count = 0, 0
for m in ANSI_SGR_PATTERN.finditer(stdout):
if m.group() == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
return stdout + u'\u001b[0m' * (set_count - reset_count)
def deepmerge(a, b):
"""
Merge dict structures and return the result.
>>> a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}}
>>> b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}}
>>> import pprint; pprint.pprint(deepmerge(a, b))
{'first': {'all_rows': {'fail': 'cat', 'number': '5', 'pass': 'dog'}}}
"""
if isinstance(a, dict) and isinstance(b, dict):
return dict([(k, deepmerge(a.get(k), b.get(k)))
for k in set(a.keys()).union(b.keys())])
elif b is None:
return a
else:
return b
|
mk_rxc.py
|
# IO options for loading pathway data.
# Zilin Song, 20 AUG 2021
#
import iomisc, numpy, multiprocessing, dist_compute
def extract_rxc(mda_universe, mda_universe_label, pathname, nrep=50, ):
'''Extract all reaction coordinates in one RPM coordinate of nrep replicas..
'''
rep_distlabel = [] # Labels
path_rxc_distmat = []
path_rxc_labelmat = []
for repid in range(1, nrep+1):
rep_distlabel.append('{0}.rep{1}'.format(mda_universe_label, str(repid)))
path_rxc_distrow, path_rxc_labelrow = dist_compute.dist_rx(mda_universe, repid, pathname, )
path_rxc_distmat.append(path_rxc_distrow)
path_rxc_labelmat.append(path_rxc_labelrow)
return path_rxc_distmat, path_rxc_labelmat
def check_label(labelmat, ):
'''Check for consistent labeling.
'''
for i in range(len(labelmat)):
for j in range(len(labelmat[i])):
if labelmat[0][j] != labelmat[i][j]:
print('Inequal dist label detected: {0} vs {1} @ f{2}r{3}\n'.format(labelmat[0][j], labelmat[i][j], str(i), str(j), ))
exit()
def process_conf(sysname, pathname, ):
'''Process all conformations.
'''
rxc_distmat = []
rxc_distlabel = []
for pid in [i for i in range(1, 101, )]:
u = iomisc.load_path(sysname, pathname, 'fw', pid, )
u_label = '{0}.{1}.{2}.path{3}'.format(sysname, pathname, 'fw', str(pid), )
print(u_label, flush=True, )
path_distmat, path_distlabel = extract_rxc(u, u_label, pathname, )
rxc_distmat += path_distmat
rxc_distlabel += path_distlabel
for pid in [i for i in range(1, 101, )]:
u = iomisc.load_path(sysname, pathname, 'bw', pid, )
u_label = '{0}.{1}.{2}.path{3}'.format(sysname, pathname, 'bw', str(pid), )
print(u_label, flush=True, )
path_distmat, path_distlabel = extract_rxc(u, u_label, pathname, )
rxc_distmat += path_distmat
rxc_distlabel += path_distlabel
rxc_distmat = numpy.asarray(rxc_distmat)
print('Finished dist extraction: rxc_distmat.shape = {0}.\n Checking labels...'.format(rxc_distmat.shape))
check_label(rxc_distlabel)
print('Labels_checked.\nDONE.')
numpy.save('./rawds_rxc/{0}.{1}.rxc_distmat.npy'.format(sysname, pathname.replace('-', '')), rxc_distmat, )
numpy.save('./rawds_rxc/{0}.{1}.rxc_distlabel.npy'.format(sysname, pathname.replace('-', '')), rxc_distlabel[0], )
def main():
jobs = []
p0 = multiprocessing.Process(target=process_conf, args=('toho_amp', 'r1ae'))
p1 = multiprocessing.Process(target=process_conf, args=('toho_amp', 'r2ae'))
p2 = multiprocessing.Process(target=process_conf, args=('toho_cex', 'r1ae'))
p3 = multiprocessing.Process(target=process_conf, args=('toho_cex', 'r2ae'))
jobs.append(p0)
jobs.append(p1)
jobs.append(p2)
jobs.append(p3)
for p in jobs:
p.start()
if __name__ == "__main__":
main()
|
DataGen_Transformer_mapRanking.py
|
import csv
import os
import sys
import shutil
import time
import numpy as np
import scipy.io as sio
import yaml
import argparse
from easydict import EasyDict
from os.path import dirname, realpath, pardir
from hashids import Hashids
import hashlib
sys.path.append(os.path.join(dirname(realpath(__file__)), pardir))
import utils.graphUtils.graphTools as graph
# from utils.graphUtils.graphTools import isConnected
from dataloader.statetransformer import AgentState
# from dataloader.statetransformer_localGuidance import AgentState
from scipy.spatial.distance import squareform, pdist
from multiprocessing import Queue, Process
parser = argparse.ArgumentParser("Input width and #Agent")
parser.add_argument('--num_agents', type=int, default=4)
parser.add_argument('--map_w', type=int, default=10)
parser.add_argument('--map_density', type=float, default=0.1)
parser.add_argument('--dir_SaveData', type=str, default='../MultiAgentDataset/DataSource_DMap_LG')
parser.add_argument('--loadmap_TYPE', type=str, default='map')
parser.add_argument('--id_env', type=int, default=None)
parser.add_argument('--solCases_dir', type=str, default='../MultiAgentDataset/Solution_DMap')
parser.add_argument('--chosen_solver', type=str, default='ECBS')
parser.add_argument('--id_start', type=int, default=0)
parser.add_argument('--div_train', type=int, default=21000)
parser.add_argument('--div_valid', type=int, default=200)
parser.add_argument('--div_test', type=int, default=4500)
parser.add_argument('--FOV', type=int, default=9)
zeroTolerance = 1e-9
args = parser.parse_args()
class DataTransformer:
def __init__(self, config):
self.config = config
self.PROCESS_NUMBER = 4
self.num_agents = self.config.num_agents
self.size_map = [self.config.map_w, self.config.map_w]
self.label_density = str(self.config.map_density).split('.')[-1]
self.AgentState = AgentState(self.config)
self.communicationRadius = 5 # communicationRadius
self.zeroTolerance = 1e-9
self.delta = [[-1, 0], # go up
[0, -1], # go left
[1, 0], # go down
[0, 1], # go right
[0, 0]] # stop
self.num_actions = 5
self.list_seqtrain_file = []
self.list_train_file = []
self.list_seqvalid_file = []
self.list_validStep_file = []
self.list_valid_file = []
self.list_test_file = []
self.hashids = Hashids(alphabet='01234567789abcdef', min_length=5)
self.pathtransformer = self.pathtransformer_RelativeCoordinate
if self.config.id_env == None:
self.label_setup = '{}{:02d}x{:02d}_density_p{}/{}_Agent'.format(self.config.loadmap_TYPE,
self.size_map[0],
self.size_map[1],
self.label_density,
self.num_agents)
else:
self.label_setup = '{}{:02d}x{:02d}_density_p{}/IDMap{:05d}/{}_Agent'.format(self.config.loadmap_TYPE, self.size_map[0],self.size_map[1],
self.label_density,self.config.id_env,
self.num_agents)
self.dirName_parent = os.path.join(self.config.solCases_dir, self.label_setup)
self.dirName_Store = os.path.join(self.config.dir_SaveData, self.label_setup)
self.dirName_input = os.path.join(self.dirName_parent, 'input')
self.dirName_output = os.path.join(self.dirName_parent, 'output_{}'.format(config.chosen_solver))
self.set_up()
def set_up(self):
self.list_failureCases_solution = self.search_failureCases(self.dirName_output)
self.list_failureCases_input = self.search_failureCases(self.dirName_input)
self.nameprefix_input = self.list_failureCases_input[0].split('input/')[-1].split('ID')[0]
self.list_failureCases_solution = sorted(self.list_failureCases_solution)
self.len_failureCases_solution = len(self.list_failureCases_solution)
def reset(self):
self.task_queue = Queue()
dirpath = self.dirName_Store
if os.path.exists(dirpath) and os.path.isdir(dirpath):
shutil.rmtree(dirpath)
self.path_save_solDATA = self.dirName_Store
try:
# Create target Directory
os.makedirs(self.path_save_solDATA)
os.makedirs(os.path.join(self.path_save_solDATA, 'train'))
os.makedirs(os.path.join(self.path_save_solDATA, 'valid'))
os.makedirs(os.path.join(self.path_save_solDATA, 'test'))
except FileExistsError:
# print("Directory ", dirName, " already exists")
pass
def solutionTransformer(self):
# div_train = 21000
# div_valid = 61
# div_test = 4500
# div_train = 0
# div_valid = 0
# div_test = 1500
div_train = self.config.div_train
div_valid = self.config.div_valid
div_test = self.config.div_test
# div_train = 5
# div_valid = 2
# div_test = 2
num_used_data = div_train + div_valid + div_test
num_data_loop = min(num_used_data, self.len_failureCases_solution)
# for id_sol in range(num_data_loop):
for id_sol in range(self.config.id_start, num_data_loop):
if id_sol < div_train:
mode = "train"
case_config = (mode, id_sol)
self.task_queue.put(case_config)
elif id_sol < (div_train+div_valid):
mode = "valid"
case_config = (mode, id_sol)
self.task_queue.put(case_config)
elif id_sol <= num_used_data:
mode = "test"
case_config = (mode, id_sol)
self.task_queue.put(case_config)
time.sleep(0.3)
processes = []
for i in range(self.PROCESS_NUMBER):
# Run Multiprocesses
p = Process(target=self.compute_thread, args=(str(i)))
processes.append(p)
[x.start() for x in processes]
def compute_thread(self,thread_id):
while True:
try:
case_config = self.task_queue.get(block=False)
(mode, id_sol) = case_config
print('thread {} get task:{} - {}'.format(thread_id, mode, id_sol))
self.pipeline(case_config)
except:
# print('thread {} no task, exit'.format(thread_id))
return
def pipeline(self, case_config):
(mode, id_sol) = case_config
agents_schedule, agents_goal, makespan, map_data, id_case = self.load_ExpertSolution(id_sol)
# agents_schedule, agents_goal, makespan, map_data, id_case = self.load_ExpertSolution_(id_sol)
log_str = 'Transform_failureCases_ID_#{} in MAP_ID{}'.format(id_case[1],id_case[0])
print('############## {} ###############'.format(log_str))
# print(agents_schedule)
if mode == "train" or mode == "valid":
self.pathtransformer(map_data, agents_schedule, agents_goal, makespan + 1, id_case, mode)
else:
self.pathtransformer_test(map_data, agents_schedule, agents_goal, makespan + 1, id_case, mode)
def load_ExpertSolution(self, ID_case):
name_solution_file = self.list_failureCases_solution[ID_case]
# id_solved_case = name_solution_file.split('_ID')[-1].split('.yaml')[0]
map_setup = name_solution_file.split('output_')[-1].split('_IDMap')[0]
id_sol_map = name_solution_file.split('_IDMap')[-1].split('_IDCase')[0]
id_sol_case = name_solution_file.split('_IDCase')[-1].split('_')[0]
name_inputfile = os.path.join(self.dirName_input,
'input_{}_IDMap{}_IDCase{}.yaml'.format(map_setup, id_sol_map, id_sol_case))
# print(name_inputfile)
# print(name_solution_file)
with open(name_inputfile, 'r') as stream:
try:
# print(yaml.safe_load(stream))
data_config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
with open(name_solution_file, 'r') as stream:
try:
# print(yaml.safe_load(stream))
data_output = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
agentsConfig = data_config['agents']
num_agent = len(agentsConfig)
list_posObstacle = data_config['map']['obstacles']
if list_posObstacle == None:
map_data = np.zeros(self.size_map, dtype=np.int64)
else:
map_data = self.setup_map(list_posObstacle)
schedule = data_output['schedule']
makespan = data_output['statistics']['makespan']
goal_allagents = np.zeros([num_agent, 2])
schedule_agentsState = np.zeros([makespan + 1, num_agent, 2])
schedule_agentsActions = np.zeros([makespan + 1, num_agent, self.num_actions])
schedule_agents = [schedule_agentsState, schedule_agentsActions]
hash_ids = np.zeros(self.num_agents)
for id_agent in range(num_agent):
goalX = agentsConfig[id_agent]['goal'][0]
goalY = agentsConfig[id_agent]['goal'][1]
goal_allagents[id_agent][:] = [goalX, goalY]
schedule_agents = self.obtainSchedule(id_agent, schedule, schedule_agents, goal_allagents, makespan + 1)
str_id = '{}_{}_{}'.format(id_sol_map,id_sol_case,id_agent)
int_id = int(hashlib.sha256(str_id.encode('utf-8')).hexdigest(), 16) % (10 ** 5)
# hash_ids[id_agent]=np.divide(int_id,10**5)
hash_ids[id_agent] = int_id
# print(id_sol_map, id_sol_case, hash_ids)
return schedule_agents, goal_allagents, makespan, map_data, (id_sol_map, id_sol_case, hash_ids)
def load_ExpertSolution_(self, ID_case):
name_solution_file = self.list_failureCases_solution[ID_case]
id_sol_case = name_solution_file.split('_ID')[-1].split('.yaml')[0]
map_setup = 'demo'
id_sol_map = '0'
name_inputfile = os.path.join(self.dirName_input,
'failureCases_ID{}.yaml'.format(id_sol_case))
# print(name_inputfile)
# print(name_solution_file)
with open(name_inputfile, 'r') as stream:
try:
# print(yaml.safe_load(stream))
data_config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
with open(name_solution_file, 'r') as stream:
try:
# print(yaml.safe_load(stream))
data_output = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
agentsConfig = data_config['agents']
num_agent = len(agentsConfig)
list_posObstacle = data_config['map']['obstacles']
if list_posObstacle == None:
map_data = np.zeros(self.size_map, dtype=np.int64)
else:
map_data = self.setup_map(list_posObstacle)
schedule = data_output['schedule']
makespan = data_output['statistics']['makespan']
# print(schedule)
goal_allagents = np.zeros([num_agent, 2])
schedule_agentsState = np.zeros([makespan + 1, num_agent, 2])
schedule_agentsActions = np.zeros([makespan + 1, num_agent, self.num_actions])
schedule_agents = [schedule_agentsState, schedule_agentsActions]
hash_ids = np.zeros(self.num_agents)
for id_agent in range(num_agent):
goalX = agentsConfig[id_agent]['goal'][0]
goalY = agentsConfig[id_agent]['goal'][1]
goal_allagents[id_agent][:] = [goalX, goalY]
schedule_agents = self.obtainSchedule(id_agent, schedule, schedule_agents, goal_allagents, makespan + 1)
str_id = '{}_{}_{}'.format(id_sol_map, id_sol_case, id_agent)
int_id = int(hashlib.sha256(str_id.encode('utf-8')).hexdigest(), 16) % (10 ** 5)
# hash_ids[id_agent]=np.divide(int_id,10**5)
hash_ids[id_agent] = int_id
print(schedule_agents)
# print(id_sol_map, id_sol_case, hash_ids)
return schedule_agents, goal_allagents, makespan, map_data, (id_sol_map, id_sol_case, hash_ids)
def obtainSchedule(self, id_agent, agentplan, schedule_agents, goal_allagents, teamMakeSpan):
name_agent = "agent{}".format(id_agent)
[schedule_agentsState, schedule_agentsActions] = schedule_agents
planCurrentAgent = agentplan[name_agent]
pathLengthCurrentAgent = len(planCurrentAgent)
actionKeyListAgent = []
for step in range(teamMakeSpan):
if step < pathLengthCurrentAgent:
currentX = planCurrentAgent[step]['x']
currentY = planCurrentAgent[step]['y']
else:
currentX = goal_allagents[id_agent][0]
currentY = goal_allagents[id_agent][1]
schedule_agentsState[step][id_agent][:] = [currentX, currentY]
# up left down right stop
actionVectorTarget = [0, 0, 0, 0, 0]
# map action with respect to the change of position of agent
if step < (pathLengthCurrentAgent - 1):
nextX = planCurrentAgent[step + 1]['x']
nextY = planCurrentAgent[step + 1]['y']
# actionCurrent = [nextX - currentX, nextY - currentY]
elif step >= (pathLengthCurrentAgent - 1):
nextX = goal_allagents[id_agent][0]
nextY = goal_allagents[id_agent][1]
actionCurrent = [nextX - currentX, nextY - currentY]
actionKeyIndex = self.delta.index(actionCurrent)
actionKeyListAgent.append(actionKeyIndex)
actionVectorTarget[actionKeyIndex] = 1
schedule_agentsActions[step][id_agent][:] = actionVectorTarget
return [schedule_agentsState,schedule_agentsActions]
def setup_map(self, list_posObstacle):
num_obstacle = len(list_posObstacle)
map_data = np.zeros(self.size_map)
for ID_obs in range(num_obstacle):
obstacleIndexX = list_posObstacle[ID_obs][0]
obstacleIndexY = list_posObstacle[ID_obs][1]
map_data[obstacleIndexX][obstacleIndexY] = 1
return map_data
def pathtransformer_RelativeCoordinate(self, map_data, agents_schedule, agents_goal, makespan, ID_case, mode):
# input: start and goal position,
# output: a set of file,
# each file consist of state (map. goal, state) and target (action for current state)
[schedule_agentsState, schedule_agentsActions] = agents_schedule
save_PairredData = {}
# print(ID_case)
# compute AdjacencyMatrix
GSO, communicationRadius = self.computeAdjacencyMatrix(schedule_agentsState, self.communicationRadius)
(id_sol_map, id_sol_case, _) = ID_case
# transform into relative Coordinate, loop "makespan" times
self.AgentState.setmap(map_data)
input_seq_tensor = self.AgentState.toSeqInputTensor(agents_goal, schedule_agentsState, makespan)
list_input = input_seq_tensor.cpu().detach().numpy()
save_PairredData.update({'map': map_data, 'goal': agents_goal, 'inputState': schedule_agentsState,
'inputTensor': list_input, 'target': schedule_agentsActions,
'GSO': GSO,'makespan':makespan, 'HashIDs':ID_case[2], 'ID_Map':int(id_sol_map), 'ID_case':int(id_sol_case)})
# print(save_PairredData)
self.save(mode, save_PairredData, ID_case, makespan)
print("Save as {}set_#{} from MAP ID_{}.".format(mode, ID_case[1], ID_case[0]))
def pathtransformer_test(self, map_data, agents_schedule, agents_goal, makespan, ID_case, mode):
# input: start and goal position,
# output: a set of file,
# each file consist of state (map. goal, state) and target (action for current state)
(id_sol_map, id_sol_case, _) = ID_case
[schedule_agentsState, schedule_agentsActions] = agents_schedule
save_PairredData = {}
save_PairredData.update({'map': map_data, 'goal': agents_goal,
'inputState': schedule_agentsState[0],
'target': schedule_agentsActions,
'makespan': makespan, 'HashIDs':ID_case[2], 'ID_Map':int(id_sol_map), 'ID_case':int(id_sol_case)})
# print(save_PairredData)
self.save(mode, save_PairredData, ID_case, makespan)
print("Save as {}set_#{} from MAP ID_{}.".format(mode, ID_case[1], ID_case[0]))
def save(self, mode, save_PairredData, ID_case, makespan):
(id_sol_map, id_sol_case,_) = ID_case
file_name = os.path.join(self.path_save_solDATA, mode,'{}_IDMap{}_IDCase{}_MP{}.mat'.format(mode, id_sol_map, id_sol_case, makespan))
# print(file_name)
sio.savemat(file_name, save_PairredData)
def record_pathdata(self, mode, ID_case, makespan):
(id_sol_map, id_sol_case) = ID_case
data_name_mat = '{}_IDMap{}_IDCase{}_MP{}.mat'.format(mode, id_sol_map, id_sol_case, makespan)
if mode == "train":
self.list_seqtrain_file.append([data_name_mat, makespan, 0])
# print("\n train --", self.list_seqtrain_file)
for step in range(makespan):
self.list_train_file.append([data_name_mat, step, 0])
elif mode =='validStep':
self.list_seqvalid_file.append([data_name_mat, makespan, 0])
for step in range(makespan):
self.list_validStep_file.append([data_name_mat, step, 0])
elif mode == "valid":
self.list_valid_file.append([data_name_mat, makespan, 0]) # 0
elif mode == "test":
self.list_test_file.append([data_name_mat, makespan, 0]) # 0
def save_filepath(self):
dirName = self.path_save_solDATA
file_seqtrain_name = os.path.join(dirName,'{}seq_filename.csv'.format('train'))
with open(file_seqtrain_name, "w", newline="") as f:
writer = csv.writer(f)
print("\n train hello --", self.list_seqtrain_file)
writer.writerows(self.list_seqtrain_file)
file_train_name = os.path.join(dirName,'{}_filename.csv'.format('train'))
with open(file_train_name, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(self.list_train_file)
file_seqvalid_name = os.path.join(dirName,'{}seq_filename.csv'.format('valid'))
with open(file_seqvalid_name, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(self.list_seqvalid_file)
file_validStep_name = os.path.join(dirName,'{}_filename.csv'.format('validStep'))
with open(file_validStep_name, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(self.list_validStep_file)
file_valid_name = os.path.join(dirName,'{}_filename.csv'.format('valid'))
with open(file_valid_name, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(self.list_valid_file)
file_test_name = os.path.join(dirName,'{}_filename.csv'.format('test'))
with open(file_test_name, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(self.list_test_file)
def search_failureCases(self, dir):
# make a list of file name of input yaml
list_path = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if self.is_target_file(fname):
path = os.path.join(root, fname)
list_path.append(path)
return list_path
def is_target_file(self, filename):
DATA_EXTENSIONS = ['.yaml']
return any(filename.endswith(extension) for extension in DATA_EXTENSIONS)
def computeAdjacencyMatrix(self, pos, CommunicationRadius, connected=True):
# First, transpose the axis of pos so that the rest of the code follows
# through as legible as possible (i.e. convert the last two dimensions
# from 2 x nNodes to nNodes x 2)
# pos: TimeSteps x nAgents x 2 (X, Y)
# Get the appropriate dimensions
nSamples = pos.shape[0]
len_TimeSteps = pos.shape[0] # length of timesteps
nNodes = pos.shape[1] # Number of nodes
# Create the space to hold the adjacency matrices
W = np.zeros([len_TimeSteps, nNodes, nNodes])
threshold = CommunicationRadius # We compute a different
# threshold for each sample, because otherwise one bad trajectory
# will ruin all the adjacency matrices
for t in range(len_TimeSteps):
# Compute the distances
distances = squareform(pdist(pos[t])) # nNodes x nNodes
# Threshold them
W[t] = (distances < threshold).astype(pos.dtype)
# And get rid of the self-loops
W[t] = W[t] - np.diag(np.diag(W[t]))
# Now, check if it is connected, if not, let's make the
# threshold bigger
while (not graph.isConnected(W[t])) and (connected):
# while (not graph.isConnected(W[t])) and (connected):
# Increase threshold
threshold = threshold * 1.1 # Increase 10%
# Compute adjacency matrix
W[t] = (distances < threshold).astype(pos.dtype)
W[t] = W[t] - np.diag(np.diag(W[t]))
# And since the threshold has probably changed, and we want the same
# threshold for all nodes, we repeat:
W = np.zeros([len_TimeSteps, nNodes, nNodes])
for t in range(len_TimeSteps):
distances = squareform(pdist(pos[t]))
W[t] = (distances < threshold).astype(pos.dtype)
W[t] = W[t] - np.diag(np.diag(W[t]))
# And, when we compute the adjacency matrix, we normalize it by
# the degree
deg = np.sum(W[t], axis=1) # nNodes (degree vector)
# Build the degree matrix powered to the -1/2
Deg = np.diag(np.sqrt(1. / deg))
# And finally get the correct adjacency
W[t] = Deg @ W[t] @ Deg
#Same as * __matmul__
return W, threshold
# def invertTensorEW(sel, x):
#
# # Elementwise inversion of a tensor where the 0 elements are kept as zero.
# # Warning: Creates a copy of the tensor
# xInv = x.copy() # Copy the matrix to invert
# # Replace zeros for ones.
# xInv[x < zeroTolerance] = 1. # Replace zeros for ones
# xInv = 1. / xInv # Now we can invert safely
# xInv[x < zeroTolerance] = 0. # Put back the zeros
#
# return xInv
if __name__ == '__main__':
DataTransformer = DataTransformer(args)
DataTransformer.reset()
DataTransformer.solutionTransformer()
|
translator.py
|
from collections import deque
import threading
def execute_block(session, args):
try: env = session.env
except:
session.env = Environment(session)
env = session.env
env.deque.append(args)
class Environment(object):
def __init__(self, session):
self.t = threading.Thread(target=self.main)
self.deque = deque()
self.is_imported = False
self.t.start()
self.session = session
def main(self):
while True:
try:operation = self.deque.popleft()
except:
continue
if not self.is_imported:
from API.Rice.application import Application
self.is_imported = True
args_dict = operation[1]
for i in args_dict:
if i=='return_values' or i=='index' or i=='receiver':
continue
exec(compile("{}={}".format(i, args_dict[i]), "<string>", 'exec'))
exec(compile(operation[0], "<string>", 'exec'))
return_list = list()
if not operation[1]['return_values'] == "":
for i in operation[1]['return_values'].split(','):
return_list.append(eval(i))
self.session.__getattribute__('socket_{}'.format(args_dict['receiver'])).send({"response_blocks": [operation[1]['index']]+return_list})
|
subprocess42.py
|
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""subprocess42 is the answer to life the universe and everything.
It has the particularity of having a Popen implementation that can yield output
as it is produced while implementing a timeout and NOT requiring the use of
worker threads.
Example:
Wait for a child process with a timeout, send SIGTERM, wait a grace period
then send SIGKILL:
def wait_terminate_then_kill(proc, timeout, grace):
try:
return proc.wait(timeout)
except subprocess42.TimeoutExpired:
proc.terminate()
try:
return proc.wait(grace)
except subprocess42.TimeoutExpired:
proc.kill()
return proc.wait()
TODO(maruel): Add VOID support like subprocess2.
"""
import collections
import contextlib
import errno
import os
import signal
import sys
import threading
import time
import subprocess
from subprocess import CalledProcessError, PIPE, STDOUT # pylint: disable=W0611
from subprocess import list2cmdline
import six
# Default maxsize argument.
MAX_SIZE = 16384
# Set to True when inhibit_crash_dump() has been called.
_OS_ERROR_REPORTING_INHIBITED = False
if sys.platform == 'win32':
import ctypes
import msvcrt # pylint: disable=F0401
from ctypes import wintypes
from ctypes import windll
# Which to be received depends on how this process was called and outside the
# control of this script. See Popen docstring for more details.
STOP_SIGNALS = (signal.SIGBREAK, signal.SIGTERM)
# Windows processes constants.
# Subset of process priority classes.
# https://docs.microsoft.com/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getpriorityclass
BELOW_NORMAL_PRIORITY_CLASS = 0x4000
IDLE_PRIORITY_CLASS = 0x40
# Constants passed to CreateProcess creationflags argument.
# https://docs.microsoft.com/windows/desktop/api/processthreadsapi/nf-processthreadsapi-createprocessw
CREATE_SUSPENDED = 0x4
CREATE_NEW_CONSOLE = subprocess.CREATE_NEW_CONSOLE
CREATE_NEW_PROCESS_GROUP = subprocess.CREATE_NEW_PROCESS_GROUP
# Job Objects constants and structs.
JobObjectBasicLimitInformation = 2
JobObjectBasicUIRestrictions = 4
JobObjectExtendedLimitInformation = 9
# https://docs.microsoft.com/windows/desktop/api/winnt/ns-winnt-_jobobject_basic_limit_information
JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 0x8
JOB_OBJECT_LIMIT_AFFINITY = 0x10
JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x800
JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x400
JOB_OBJECT_LIMIT_JOB_MEMORY = 0x200
JOB_OBJECT_LIMIT_JOB_TIME = 0x4
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x2000
JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME = 0x40
JOB_OBJECT_LIMIT_PRIORITY_CLASS = 0x20
JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x100
JOB_OBJECT_LIMIT_PROCESS_TIME = 0x2
JOB_OBJECT_LIMIT_SCHEDULING_CLASS = 0x80
JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x1000
JOB_OBJECT_LIMIT_SUBSET_AFFINITY = 0x4000
JOB_OBJECT_LIMIT_WORKINGSET = 0x1
class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
('PerProcessUserTimeLimit', ctypes.wintypes.LARGE_INTEGER),
('PerJobUserTimeLimit', ctypes.wintypes.LARGE_INTEGER),
('LimitFlags', ctypes.wintypes.DWORD),
('MinimumWorkingSetSize', ctypes.c_size_t),
('MaximumWorkingSetSize', ctypes.c_size_t),
('ActiveProcessLimit', ctypes.wintypes.DWORD),
('Affinity', ctypes.POINTER(ctypes.wintypes.ULONG)),
('PriorityClass', ctypes.wintypes.DWORD),
('SchedulingClass', ctypes.wintypes.DWORD),
]
@property
def info_type(self):
return JobObjectBasicLimitInformation
# https://docs.microsoft.com/windows/desktop/api/winnt/ns-winnt-io_counters
class IO_COUNTERS(ctypes.Structure):
_fields_ = [
('ReadOperationCount', ctypes.c_ulonglong),
('WriteOperationCount', ctypes.c_ulonglong),
('OtherOperationCount', ctypes.c_ulonglong),
('ReadTransferCount', ctypes.c_ulonglong),
('WriteTransferCount', ctypes.c_ulonglong),
('OtherTransferCount', ctypes.c_ulonglong),
]
# https://docs.microsoft.com/windows/desktop/api/winnt/ns-winnt-_jobobject_extended_limit_information
class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
('BasicLimitInformation', JOBOBJECT_BASIC_LIMIT_INFORMATION),
('IoInfo', IO_COUNTERS),
('ProcessMemoryLimit', ctypes.c_size_t),
('JobMemoryLimit', ctypes.c_size_t),
('PeakProcessMemoryUsed', ctypes.c_size_t),
('PeakJobMemoryUsed', ctypes.c_size_t),
]
@property
def info_type(self):
return JobObjectExtendedLimitInformation
# https://docs.microsoft.com/en-us/windows/desktop/api/winnt/ns-winnt-jobobject_basic_ui_restrictions
JOB_OBJECT_UILIMIT_DESKTOP = 0x40
JOB_OBJECT_UILIMIT_DISPLAYSETTINGS = 0x10
JOB_OBJECT_UILIMIT_EXITWINDOWS = 0x80
JOB_OBJECT_UILIMIT_GLOBALATOMS = 0x20
JOB_OBJECT_UILIMIT_HANDLES = 0x1
class JOBOBJECT_BASIC_UI_RESTRICTIONS(ctypes.Structure):
_fields_ = [('UIRestrictionsClass', ctypes.wintypes.DWORD)]
@property
def info_type(self):
return JobObjectBasicUIRestrictions
def ReadFile(handle, desired_bytes):
"""Calls kernel32.ReadFile()."""
c_read = wintypes.DWORD()
buff = wintypes.create_string_buffer(desired_bytes + 1)
# If it fails, the buffer will probably(?) not be affected.
windll.kernel32.ReadFile(handle, buff, desired_bytes,
wintypes.byref(c_read), None)
# NULL terminate it.
buff[c_read.value] = '\x00'
return wintypes.GetLastError(), buff.value
def PeekNamedPipe(handle):
"""Calls kernel32.PeekNamedPipe(). Simplified version."""
c_avail = wintypes.DWORD()
c_message = wintypes.DWORD()
success = windll.kernel32.PeekNamedPipe(handle, None, 0, None,
wintypes.byref(c_avail),
wintypes.byref(c_message))
if not success:
raise OSError(wintypes.GetLastError())
return c_avail.value
def recv_multi_impl(conns, maxsize, timeout):
"""Reads from the first available pipe.
It will immediately return on a closed connection, independent of timeout.
Arguments:
- maxsize: Maximum number of bytes to return. Defaults to MAX_SIZE.
- timeout: If None, it is blocking. If 0 or above, will return None if no
data is available within |timeout| seconds.
Returns:
tuple(int(index), str(data), bool(closed)).
"""
assert conns
assert timeout is None or isinstance(timeout, (int, float)), timeout
maxsize = max(maxsize or MAX_SIZE, 1)
# TODO(maruel): Use WaitForMultipleObjects(). Python creates anonymous pipes
# for proc.stdout and proc.stderr but they are implemented as named pipes on
# Windows. Since named pipes are not waitable object, they can't be passed
# as-is to WFMO(). So this means N times CreateEvent(), N times ReadFile()
# and finally WFMO(). This requires caching the events handles in the Popen
# object and remembering the pending ReadFile() calls. This will require
# some re-architecture to store the relevant event handle and OVERLAPPEDIO
# object in Popen or the file object.
start = time.time()
handles = [
(i, msvcrt.get_osfhandle(c.fileno())) for i, c in enumerate(conns)
]
while True:
for index, handle in handles:
try:
avail = min(PeekNamedPipe(handle), maxsize)
if avail:
return index, ReadFile(handle, avail)[1], False
except OSError:
# The pipe closed.
return index, None, True
if timeout is not None and (time.time() - start) >= timeout:
return None, None, False
# Polling rocks.
time.sleep(0.001)
class _JobObject(object):
"""Manages a job object."""
def __init__(self, containment):
# The first process to be added to the job object.
self._proc = None
# https://docs.microsoft.com/windows/desktop/api/jobapi2/nf-jobapi2-createjobobjectw
self._hjob = ctypes.windll.kernel32.CreateJobObjectW(None, None)
if not self._hjob:
# pylint: disable=undefined-variable
raise WindowsError('Failed to create job object: %s' %
ctypes.GetLastError())
# TODO(maruel): Use a completion port to listen to messages as described
# at
# https://docs.microsoft.com/windows/desktop/api/winnt/ns-winnt-_jobobject_associate_completion_port
# TODO(maruel): Enable configuring the limit, like maximum number of
# processes, working set size.
obj = JOBOBJECT_EXTENDED_LIMIT_INFORMATION()
obj.BasicLimitInformation.LimitFlags = (
JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION
| JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE)
if containment.limit_processes:
obj.BasicLimitInformation.ActiveProcessLimit = (
containment.limit_processes)
obj.BasicLimitInformation.LimitFlags |= JOB_OBJECT_LIMIT_ACTIVE_PROCESS
if containment.limit_total_committed_memory:
obj.BasicLimitInformation.LimitFlags |= JOB_OBJECT_LIMIT_JOB_MEMORY
obj.JobMemoryLimit = containment.limit_total_committed_memory
self._set_information(obj)
# Add UI limitations.
# TODO(maruel): The limitations currently used are based on Chromium's
# testing needs. For example many unit tests use the clipboard, or change
# the display settings (!)
obj = JOBOBJECT_BASIC_UI_RESTRICTIONS(
UIRestrictionsClass=JOB_OBJECT_UILIMIT_DESKTOP
| JOB_OBJECT_UILIMIT_EXITWINDOWS | JOB_OBJECT_UILIMIT_GLOBALATOMS
| JOB_OBJECT_UILIMIT_HANDLES)
self._set_information(obj)
def close(self):
if self._hjob:
ctypes.windll.kernel32.CloseHandle(self._hjob)
self._hjob = None
def kill(self, exit_code):
"""Return True if the TerminateJobObject call succeeded, or not operation
was done.
"""
if not self._hjob:
return True
# "Kill" the job object instead of the process.
# https://docs.microsoft.com/windows/desktop/api/jobapi2/nf-jobapi2-terminatejobobject
return bool(
ctypes.windll.kernel32.TerminateJobObject(self._hjob, exit_code))
def assign_proc(self, proc):
"""Assigns the process handle to the job object."""
if not ctypes.windll.kernel32.AssignProcessToJobObject(
self._hjob, int(proc._handle)):
# pylint: disable=undefined-variable
raise WindowsError('Failed to assign job object: %s' %
ctypes.GetLastError())
if not ctypes.windll.kernel32.ResumeThread(int(proc._handle_thread)):
# pylint: disable=undefined-variable
raise WindowsError('Failed to resume child process thread: %s' %
ctypes.GetLastError())
def _set_information(self, obj):
# https://docs.microsoft.com/windows/desktop/api/jobapi2/nf-jobapi2-setinformationjobobject
if not ctypes.windll.kernel32.SetInformationJobObject(
self._hjob, obj.info_type, ctypes.byref(obj), ctypes.sizeof(obj)):
# pylint: disable=undefined-variable
raise WindowsError('Failed to adjust job object with type %s: %s' %
(obj.info_type, ctypes.GetLastError()))
else:
import fcntl # pylint: disable=F0401
import select
# Signals that mean this process should exit quickly.
STOP_SIGNALS = (signal.SIGINT, signal.SIGTERM)
def recv_multi_impl(conns, maxsize, timeout):
"""Reads from the first available pipe.
It will immediately return on a closed connection, independent of timeout.
Arguments:
- maxsize: Maximum number of bytes to return. Defaults to MAX_SIZE.
- timeout: If None, it is blocking. If 0 or above, will return None if no
data is available within |timeout| seconds.
Returns:
tuple(int(index), str(data), bool(closed)).
"""
assert conns
assert timeout is None or isinstance(timeout, (int, float)), timeout
maxsize = max(maxsize or MAX_SIZE, 1)
# select(timeout=0) will block, it has to be a value > 0.
if timeout == 0:
timeout = 0.001
try:
r, _, _ = select.select(conns, [], [], timeout)
except select.error:
r = None
if not r:
return None, None, False
conn = r[0]
# Temporarily make it non-blocking.
# TODO(maruel): This is not very efficient when the caller is doing this in
# a loop. Add a mechanism to have the caller handle this.
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
if not conn.closed:
# pylint: disable=E1101
fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK)
try:
try:
data = conn.read(maxsize)
except IOError as e:
# On posix, this means the read would block.
if e.errno == errno.EAGAIN:
return conns.index(conn), None, False
raise e
if not data:
# On posix, this means the channel closed.
return conns.index(conn), None, True
return conns.index(conn), data, False
finally:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
if six.PY3:
TimeoutExpired = subprocess.TimeoutExpired
else:
class TimeoutExpired(Exception):
"""Compatible with python3 subprocess."""
def __init__(self, cmd, timeout, output=None, stderr=None):
self.cmd = cmd
self.timeout = timeout
self.output = output
# Non-standard:
self.stderr = stderr
super(TimeoutExpired, self).__init__(str(self))
def __str__(self):
return "Command '%s' timed out after %s seconds" % (self.cmd,
self.timeout)
class Containment(object):
"""Defines the containment used to run the process.
On Windows, this is done via a Job Object.
https://docs.microsoft.com/en-us/windows/desktop/procthread/job-objects
"""
# AUTO will use containment if possible, but will not fail if not adequate on
# this operating system.
#
# For example, job objects cannot be nested on Windows 7 / Windows Server 2008
# and earlier, thus AUTO means NONE on these platforms. Windows 8 and Window
# Server 2012 and later support nest job objects, thus AUTO means ENABLED on
# these platforms.
# See https://docs.microsoft.com/en-us/windows/desktop/procthread/job-objects
# cgroups will be added.
NONE, AUTO, JOB_OBJECT = range(3)
NAMES = {
NONE: 'NONE',
AUTO: 'AUTO',
JOB_OBJECT: 'JOB_OBJECT',
}
def __init__(self,
containment_type=NONE,
limit_processes=0,
limit_total_committed_memory=0):
self.containment_type = containment_type
# Limit on the number of active processes.
self.limit_processes = limit_processes
self.limit_total_committed_memory = limit_total_committed_memory
def __eq__(self, rhs):
if not rhs:
return False
return (
self.containment_type == rhs.containment_type and
self.limit_processes == rhs.limit_processes and
self.limit_total_committed_memory == rhs.limit_total_committed_memory)
def __str__(self):
return 'Containment<%s, %s, %s>' % (self.NAMES[self.containment_type],
self.limit_processes,
self.limit_total_committed_memory)
def __repr__(self):
return self.__str__()
class Popen(subprocess.Popen):
"""Adds timeout support on stdout and stderr.
Inspired by
http://code.activestate.com/recipes/440554-module-to-allow-asynchronous-subprocess-use-on-win/
Unlike subprocess, yield_any(), recv_*(), communicate() will close stdout and
stderr once the child process closes them, after all the data is read.
Mutated behavior:
- args: transparently encode('utf-8') any unicode items.
- cwd: transparently encode('utf-8') if unicode.
- env: transparently encode('utf-8') any unicode keys or values.
Additional arguments:
- detached: If True, the process is created in a new process group. On
Windows, use CREATE_NEW_PROCESS_GROUP. On posix, use os.setpgid(0, 0).
- lower_priority: reduce the process priority a bit.
- containment: Containment instance or None. When using containment, one of
communicate(), poll(), wait(), yield_any(), yield_any_line() must be
used otherwise a kernel handle may leak.
Additional members:
- start: timestamp when this process started.
- end: timestamp when this process exited, as seen by this process.
- detached: If True, the child process was started as a detached process.
- pgid: process group id, if any.
- duration: time in seconds the process lasted.
Additional methods:
- yield_any(): yields output until the process terminates.
- recv_any(): reads from stdout and/or stderr with optional timeout.
- recv_out() & recv_err(): specialized version of recv_any().
"""
# subprocess.Popen.__init__() is not threadsafe; there is a race between
# creating the exec-error pipe for the child and setting it to CLOEXEC during
# which another thread can fork and cause the pipe to be inherited by its
# descendents, which will cause the current Popen to hang until all those
# descendents exit. Protect this with a lock so that only one fork/exec can
# happen at a time.
popen_lock = threading.Lock()
def __init__(self, args, **kwargs):
# Windows version of subprocess.Popen() really doens't like unicode. In
# practice we should use the current ANSI code page, but settle for utf-8
# across all OSes for consistency.
to_str = lambda i: i if isinstance(i, str) else six.ensure_str(i)
args = [to_str(i) for i in args]
if kwargs.get('cwd') is not None:
kwargs['cwd'] = to_str(kwargs['cwd'])
if kwargs.get('env'):
kwargs['env'] = {to_str(k): to_str(v) for k, v in kwargs['env'].items()}
# Set via contrived monkey patching below, because stdlib doesn't expose
# thread handle. Only set on Windows.
self._handle_thread = None
# Will be set by super constructor but may be accessed in failure modes by
# _cleanup().
self._handle = None
self._job = None
self.detached = kwargs.pop('detached', False)
if self.detached:
if sys.platform == 'win32':
prev = kwargs.get('creationflags', 0)
kwargs['creationflags'] = prev | CREATE_NEW_PROCESS_GROUP
else:
old_preexec_fn_1 = kwargs.get('preexec_fn')
def new_preexec_fn_1():
if old_preexec_fn_1:
old_preexec_fn_1()
os.setpgid(0, 0)
kwargs['preexec_fn'] = new_preexec_fn_1
if kwargs.pop('lower_priority', False):
if sys.platform == 'win32':
# TODO(maruel): If already in this class, it should use
# IDLE_PRIORITY_CLASS.
prev = kwargs.get('creationflags', 0)
kwargs['creationflags'] = prev | BELOW_NORMAL_PRIORITY_CLASS
else:
old_preexec_fn_2 = kwargs.get('preexec_fn')
def new_preexec_fn_2():
if old_preexec_fn_2:
old_preexec_fn_2()
os.nice(1)
kwargs['preexec_fn'] = new_preexec_fn_2
self.containment = kwargs.pop('containment', None) or Containment()
if self.containment.containment_type != Containment.NONE:
if self.containment.containment_type == Containment.JOB_OBJECT:
if sys.platform != 'win32':
raise NotImplementedError(
'containment is not implemented on this platform')
if sys.platform == 'win32':
# May throw an WindowsError.
# pylint: disable=undefined-variable
self._job = _JobObject(self.containment)
# In this case, start the process suspended, so we can assign the job
# object, then resume it.
prev = kwargs.get('creationflags', 0)
kwargs['creationflags'] = prev | CREATE_SUSPENDED
self.end = None
self.pgid = None
self.start = time.time()
try:
with self.popen_lock:
if sys.platform == 'win32':
# We need the thread handle, save it.
old = subprocess._subprocess.CreateProcess
class FakeHandle(object):
def Close(self):
pass
def patch_CreateProcess(*args, **kwargs):
hp, ht, pid, tid = old(*args, **kwargs)
# Save the thread handle, and return a fake one that
# _execute_child() will close indiscriminally.
self._handle_thread = ht
return hp, FakeHandle(), pid, tid
subprocess._subprocess.CreateProcess = patch_CreateProcess
try:
super(Popen, self).__init__(args, **kwargs)
finally:
if sys.platform == 'win32':
subprocess._subprocess.CreateProcess = old
except:
self._cleanup()
raise
self.args = args
if self.detached and sys.platform != 'win32':
try:
self.pgid = os.getpgid(self.pid)
except OSError:
# sometimes the process can run+finish before we collect its pgid. fun.
pass
if self._job:
try:
self._job.assign_proc(self)
except OSError:
self.kill()
self.wait()
def duration(self):
"""Duration of the child process.
It is greater or equal to the actual time the child process ran. It can be
significantly higher than the real value if neither .wait() nor .poll() was
used.
"""
return (self.end or time.time()) - self.start
# pylint: disable=arguments-differ,redefined-builtin
def communicate(self, input=None, timeout=None):
"""Implements python3's timeout support.
Unlike wait(), timeout=0 is considered the same as None.
Returns:
tuple of (stdout, stderr).
Raises:
- TimeoutExpired when more than timeout seconds were spent waiting for the
process.
"""
if not timeout:
return super(Popen, self).communicate(input=input)
if six.PY3:
return super(Popen, self).communicate( # pylint: disable=unexpected-keyword-arg
input=input,
timeout=timeout,
)
assert isinstance(timeout, (int, float)), timeout
if self.stdin or self.stdout or self.stderr:
stdout = b'' if self.stdout else None
stderr = b'' if self.stderr else None
t = None
if input is not None:
assert self.stdin, ('Can\'t use communicate(input) if not using '
'Popen(stdin=subprocess42.PIPE')
# TODO(maruel): Switch back to non-threading.
def write():
try:
self.stdin.write(input)
except IOError:
pass
t = threading.Thread(name='Popen.communicate', target=write)
t.daemon = True
t.start()
try:
if self.stdout or self.stderr:
start = time.time()
end = start + timeout
def remaining():
return max(end - time.time(), 0)
for pipe, data in self.yield_any(timeout=remaining):
if pipe is None:
raise TimeoutExpired(self.args, timeout, stdout, stderr)
assert pipe in ('stdout', 'stderr'), pipe
if pipe == 'stdout':
stdout += data
else:
stderr += data
else:
# Only stdin is piped.
self.wait(timeout=timeout)
finally:
if t:
try:
self.stdin.close()
except IOError:
pass
t.join()
else:
# No pipe. The user wanted to use wait().
self.wait(timeout=timeout)
return None, None
# Indirectly initialize self.end.
self.wait()
return stdout, stderr
def wait(self,
timeout=None,
poll_initial_interval=0.001,
poll_max_interval=0.05): # pylint: disable=arguments-differ
"""Implements python3's timeout support.
Raises:
- TimeoutExpired when more than timeout seconds were spent waiting for the
process.
"""
assert timeout is None or isinstance(timeout, (int, float)), timeout
if timeout is None:
super(Popen, self).wait()
elif six.PY3:
super(Popen, self).wait(timeout)
elif self.returncode is None:
if sys.platform == 'win32':
WAIT_TIMEOUT = 258
result = subprocess._subprocess.WaitForSingleObject(
self._handle, int(timeout * 1000))
if result == WAIT_TIMEOUT:
raise TimeoutExpired(self.args, timeout)
self.returncode = subprocess._subprocess.GetExitCodeProcess(
self._handle)
else:
# If you think the following code is horrible, it's because it is
# inspired by python3's stdlib.
end = time.time() + timeout
delay = poll_initial_interval
while True:
try:
pid, sts = subprocess._eintr_retry_call(os.waitpid, self.pid,
os.WNOHANG)
except OSError as e:
if e.errno != errno.ECHILD:
raise
pid = self.pid
sts = 0
if pid == self.pid:
# This sets self.returncode.
self._handle_exitstatus(sts)
break
remaining = end - time.time()
if remaining <= 0:
raise TimeoutExpired(self.args, timeout)
delay = min(delay * 2, remaining, poll_max_interval)
time.sleep(delay)
if not self.end:
# communicate() uses wait() internally.
self.end = time.time()
self._cleanup()
return self.returncode
def poll(self):
ret = super(Popen, self).poll()
if ret is not None and not self.end:
self.end = time.time()
# This may kill all children processes.
self._cleanup()
return ret
def yield_any_line(self, **kwargs):
"""Yields lines until the process terminates.
Like yield_any, but yields lines.
"""
return split(self.yield_any(**kwargs))
def yield_any(self, maxsize=None, timeout=None):
"""Yields output until the process terminates.
Unlike wait(), does not raise TimeoutExpired.
Yields:
(pipename, data) where pipename is either 'stdout', 'stderr' or None in
case of timeout or when the child process closed one of the pipe(s) and
all pending data on the pipe was read.
Arguments:
- maxsize: See recv_any(). Can be a callable function.
- timeout: If None, the call is blocking. If set, yields None, None if no
data is available within |timeout| seconds. It resets itself after
each yield. Can be a callable function.
"""
assert self.stdout or self.stderr
if timeout is not None:
# timeout=0 effectively means that the pipe is continuously polled.
if isinstance(timeout, (int, float)):
assert timeout >= 0, timeout
old_timeout = timeout
timeout = lambda: old_timeout
else:
assert callable(timeout), timeout
if maxsize is not None and not callable(maxsize):
assert isinstance(maxsize, (int, float)), maxsize
last_yield = time.time()
while self.poll() is None:
to = timeout() if timeout else None
if to is not None:
to = max(to - (time.time() - last_yield), 0)
t, data = self.recv_any(
maxsize=maxsize() if callable(maxsize) else maxsize, timeout=to)
if data or to == 0:
yield t, data
last_yield = time.time()
# Read all remaining output in the pipes.
# There is 3 cases:
# - pipes get closed automatically by the calling process before it exits
# - pipes are closed automated by the OS
# - pipes are kept open due to grand-children processes outliving the
# children process.
while True:
ms = maxsize
if callable(maxsize):
ms = maxsize()
# timeout=0 is mainly to handle the case where a grand-children process
# outlives the process started.
t, data = self.recv_any(maxsize=ms, timeout=0)
if not data:
break
yield t, data
def recv_any(self, maxsize=None, timeout=None):
"""Reads from the first pipe available from stdout and stderr.
Unlike wait(), does not throw TimeoutExpired.
Arguments:
- maxsize: Maximum number of bytes to return. Defaults to MAX_SIZE.
- timeout: If None, it is blocking. If 0 or above, will return None if no
data is available within |timeout| seconds.
Returns:
tuple(pipename or None, str(data)). pipename is one of 'stdout' or
'stderr'.
"""
# recv_multi_impl will early exit on a closed connection. Loop accordingly
# to simplify call sites.
while True:
pipes = [
x for x in ((self.stderr, 'stderr'), (self.stdout, 'stdout')) if x[0]
]
# If both stdout and stderr have the exact file handle, they are
# effectively the same pipe. Deduplicate it since otherwise it confuses
# recv_multi_impl().
if len(pipes) == 2 and self.stderr.fileno() == self.stdout.fileno():
pipes.pop(0)
if not pipes:
return None, None
start = time.time()
conns, names = zip(*pipes)
index, data, closed = recv_multi_impl(conns, maxsize, timeout)
if index is None:
return index, data
if closed:
self._close(names[index])
if not data:
# Loop again. The other pipe may still be open.
if timeout:
timeout -= (time.time() - start)
continue
if self.universal_newlines and data:
if six.PY3:
data = self._translate_newlines(
six.ensure_binary(data), encoding='utf-8', errors='strict')
else:
data = self._translate_newlines(data)
return names[index], data
def recv_out(self, maxsize=None, timeout=None):
"""Reads from stdout synchronously with timeout."""
return self._recv('stdout', maxsize, timeout)
def recv_err(self, maxsize=None, timeout=None):
"""Reads from stderr synchronously with timeout."""
return self._recv('stderr', maxsize, timeout)
def terminate(self):
"""Tries to do something saner on Windows that the stdlib.
Windows:
self.detached/CREATE_NEW_PROCESS_GROUP determines what can be used:
- If set, only SIGBREAK can be sent and it is sent to a single process.
- If not set, in theory only SIGINT can be used and *all processes* in
the processgroup receive it. In practice, we just kill the process.
See http://msdn.microsoft.com/library/windows/desktop/ms683155.aspx
The default on Windows is to call TerminateProcess() always, which is not
useful.
On Posix, always send SIGTERM.
"""
try:
if sys.platform == 'win32' and self.detached:
return self.send_signal(signal.CTRL_BREAK_EVENT)
super(Popen, self).terminate()
except OSError:
# The function will throw if the process terminated in-between. Swallow
# this.
pass
def kill(self):
"""Kills the process and its children if possible.
Swallows exceptions and return True on success.
This process may be asynchronous. The user should still call wait() to
ensure the process is indeed terminated.
Note that if this Popen uses job containment (e.g. Job Objects on Windows or
a process group on Linux/macOS), then `kill()` will have an effect even if
`wait()` already returned. Namely, even though the immediate child process
finished, there can still be indirect subprocesses in the Job Object or
process group which need to be killed.
"""
# First check if there's some sort of containment (job object or process
# group)
if self._job:
# Use the equivalent of SIGKILL on linux. signal.SIGKILL is not available
# on Windows.
return self._job.kill(-9)
if self.pgid:
try:
os.killpg(self.pgid, signal.SIGKILL)
return True
except OSError:
return False
# At this point we only have tracking information for the process itself;
# Return True if we have a returncode (i.e. it's already been wait()'d),
# otherwise defer to subprocess.Popen.kill()
if self.returncode is not None:
# If a return code was recorded, it means there's nothing to kill as there
# was no containment.
return True
try:
super(Popen, self).kill()
except OSError:
return False
return True
def _close(self, which):
"""Closes either stdout or stderr."""
getattr(self, which).close()
setattr(self, which, None)
def _cleanup(self):
"""Makes sure resources are not leaked."""
if self._job:
# This may kill all children processes.
self._job.close()
self._job = None
if self._handle_thread:
self._handle_thread.Close()
self._handle_thread = None
if self._handle:
# self._handle is deleted via __del__ but when it happens is
# non-deterministic, so do it earlier.
self._handle.Close()
self._handle = None
def _recv(self, which, maxsize, timeout):
"""Reads from one of stdout or stderr synchronously with timeout."""
conn = getattr(self, which)
if conn is None:
return None
_, data, closed = recv_multi_impl([conn], maxsize, timeout)
if closed:
self._close(which)
if self.universal_newlines and data:
data = self._translate_newlines(data)
return data
@contextlib.contextmanager
def set_signal_handler(signals, handler):
"""Temporarilly override signals handler.
Useful when waiting for a child process to handle signals like SIGTERM, so the
signal can be propagated to the child process.
"""
previous = {s: signal.signal(s, handler) for s in signals}
try:
yield
finally:
for sig, h in previous.items():
signal.signal(sig, h)
def call(*args, **kwargs):
"""Adds support for timeout."""
timeout = kwargs.pop('timeout', None)
return Popen(*args, **kwargs).wait(timeout)
def check_call(*args, **kwargs):
"""Adds support for timeout."""
retcode = call(*args, **kwargs)
if retcode:
raise CalledProcessError(retcode, kwargs.get('args') or args[0])
return 0
def check_output(*args, **kwargs):
"""Adds support for timeout."""
timeout = kwargs.pop('timeout', None)
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *args, **kwargs)
output, _ = process.communicate(timeout=timeout)
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, kwargs.get('args') or args[0], output)
return output
def call_with_timeout(args, timeout, **kwargs):
"""Runs an executable; kill it in case of timeout."""
proc = Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, **kwargs)
try:
out, err = proc.communicate(timeout=timeout)
except TimeoutExpired as e:
out = e.output
err = e.stderr
proc.kill()
proc.wait()
return out, err, proc.returncode, proc.duration()
def inhibit_os_error_reporting():
"""Inhibits error reporting UI and core files.
This function should be called as early as possible in the process lifetime.
"""
global _OS_ERROR_REPORTING_INHIBITED
if not _OS_ERROR_REPORTING_INHIBITED:
_OS_ERROR_REPORTING_INHIBITED = True
if sys.platform == 'win32':
# Windows has a bad habit of opening a dialog when a console program
# crashes, rather than just letting it crash. Therefore, when a program
# crashes on Windows, we don't find out until the build step times out.
# This code prevents the dialog from appearing, so that we find out
# immediately and don't waste time waiting for a user to close the dialog.
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms680621.aspx
SEM_FAILCRITICALERRORS = 1
SEM_NOGPFAULTERRORBOX = 2
SEM_NOALIGNMENTFAULTEXCEPT = 0x8000
ctypes.windll.kernel32.SetErrorMode(SEM_FAILCRITICALERRORS
| SEM_NOGPFAULTERRORBOX
| SEM_NOALIGNMENTFAULTEXCEPT)
# TODO(maruel): Other OSes.
# - OSX, need to figure out a way to make the following process tree local:
# defaults write com.apple.CrashReporter UseUNC 1
# defaults write com.apple.CrashReporter DialogType none
# - Ubuntu, disable apport if needed.
def split(data, sep=b'\n'):
"""Splits pipe data by |sep|. Does some buffering.
For example, [('stdout', b'a\nb'), ('stdout', b'\n'), ('stderr', b'c\n')] ->
[('stdout', b'a'), ('stdout', b'b'), ('stderr', b'c')].
Args:
data: iterable of tuples (pipe_name, bytes).
Returns:
An iterator of tuples (pipe_name, bytes) where bytes is the input data
but split by sep into separate tuples.
"""
# A dict {pipe_name -> list of pending chunks without separators}
pending_chunks = collections.defaultdict(list)
for pipe_name, chunk in data:
if chunk is None:
# Happens if a pipe is closed.
continue
pending = pending_chunks[pipe_name]
start = 0 # offset in chunk to start |sep| search from
while start < len(chunk):
j = chunk.find(sep, start)
if j == -1:
pending_chunks[pipe_name].append(chunk[start:])
break
to_emit = chunk[start:j]
start = j + 1
if pending:
# prepend and forget
to_emit = b''.join(pending) + to_emit
pending = []
pending_chunks[pipe_name] = pending
yield pipe_name, to_emit
# Emit remaining chunks that don't end with separators as is.
for pipe_name, chunks in sorted(pending_chunks.items()):
if chunks:
yield pipe_name, b''.join(chunks)
|
event_based_scheduler_job.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sched
import signal
import sys
import threading
import time
import traceback
from typing import Callable, List, Optional
from airflow.contrib.jobs.periodic_manager import PeriodicManager
from airflow.events.context_extractor import ContextExtractor, EventContext
from airflow.exceptions import SerializedDagNotFound, AirflowException
from airflow.models.dagcode import DagCode
from airflow.models.event_progress import get_event_progress, create_or_update_progress
from airflow.models.message import IdentifiedMessage, MessageState
from sqlalchemy import func, not_, or_, asc, case
from sqlalchemy.orm import selectinload
from sqlalchemy.orm.session import Session
from airflow import models, settings
from airflow.configuration import conf
from airflow.executors.base_executor import BaseExecutor
from airflow.jobs.base_job import BaseJob
from airflow.models import DagModel, BaseOperator
from airflow.models.dag import DagEventDependencies, DAG
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.eventhandler import EventKey
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import TaskInstanceKey
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from airflow.utils.mailbox import Mailbox
from airflow.events.scheduler_events import (
StopSchedulerEvent, TaskSchedulingEvent, DagExecutableEvent, TaskStateChangedEvent, EventHandleEvent, RequestEvent,
ResponseEvent, StopDagEvent, ParseDagRequestEvent, ParseDagResponseEvent, SchedulerInnerEventUtil,
BaseUserDefineMessage, UserDefineMessageType, SCHEDULER_NAMESPACE, DagRunFinishedEvent, PeriodicEvent,
DagRunCreatedEvent)
from notification_service.base_notification import BaseEvent
from notification_service.client import EventWatcher, NotificationClient
from airflow.contrib.jobs.dag_trigger import DagTrigger
from airflow.contrib.jobs.dagrun_event_manager import DagRunEventManager, DagRunId
from airflow.executors.scheduling_action import SchedulingAction
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
MSG = models.Message
class EventBasedScheduler(LoggingMixin):
def __init__(self, id,
mailbox: Mailbox,
task_event_manager: DagRunEventManager,
executor: BaseExecutor,
notification_client: NotificationClient,
notification_server_uri: str,
context=None,
periodic_manager: PeriodicManager = None):
super().__init__(context)
self.id = id
self.mailbox = mailbox
self.task_event_manager: DagRunEventManager = task_event_manager
self.executor = executor
self.notification_client = notification_client
self.dagbag = DagBag(read_dags_from_db=True)
self._timer_handler = None
self.timers = sched.scheduler()
self.periodic_manager = periodic_manager
self.notification_server_uri = notification_server_uri
def sync(self):
def call_regular_interval(
delay: float,
action: Callable,
arguments=(),
kwargs={},
): # pylint: disable=dangerous-default-value
def repeat(*args, **kwargs):
action(*args, **kwargs)
# This is not perfect. If we want a timer every 60s, but action
# takes 10s to run, this will run it every 70s.
# Good enough for now
self._timer_handler = self.timers.enter(delay, 1, repeat, args, kwargs)
self._timer_handler = self.timers.enter(delay, 1, repeat, arguments, kwargs)
call_regular_interval(
delay=conf.getfloat('scheduler', 'scheduler_heartbeat_sec', fallback='5.0'),
action=self.executor.sync
)
self.timers.run()
def stop_timer(self):
if self.timers and self._timer_handler:
self.timers.cancel(self._timer_handler)
def submit_sync_thread(self):
threading.Thread(target=self.sync).start()
def schedule(self) -> bool:
identified_message = self.mailbox.get_identified_message()
if not identified_message:
return True
origin_event = identified_message.deserialize()
self.log.debug("Event: {}".format(origin_event))
if SchedulerInnerEventUtil.is_inner_event(origin_event):
event = SchedulerInnerEventUtil.to_inner_event(origin_event)
else:
event = origin_event
with create_session() as session:
if isinstance(event, BaseEvent):
dagruns = self._find_dagruns_by_event(event, session)
for dagrun in dagruns:
dag_run_id = DagRunId(dagrun.dag_id, dagrun.run_id)
self.task_event_manager.handle_event(dag_run_id, event)
elif isinstance(event, RequestEvent):
self._process_request_event(event)
elif isinstance(event, TaskSchedulingEvent):
self._schedule_task(event)
elif isinstance(event, TaskStateChangedEvent):
dagrun = self._find_dagrun(event.dag_id, event.execution_date, session)
if dagrun is not None:
self._handle_task_status_changed(dagrun, event, session)
dag_run_id = DagRunId(dagrun.dag_id, dagrun.run_id)
self.task_event_manager.handle_event(dag_run_id, origin_event)
tasks = self._find_downstream_tasks(event.task_id, dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
if dagrun.state in State.finished:
self.mailbox.send_message(DagRunFinishedEvent(dagrun.dag_id, dagrun.execution_date).to_event())
else:
self.log.warning("dagrun is None for dag_id:{} execution_date: {}".format(event.dag_id,
event.execution_date))
elif isinstance(event, DagRunCreatedEvent):
dagrun = self._find_dagrun(event.dag_id, event.execution_date, session)
if dagrun is not None:
tasks = self._find_scheduled_tasks(dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
else:
self.log.warning("dagrun is None for dag_id:{} execution_date: {}".format(
event.dag_id, event.execution_date))
elif isinstance(event, DagExecutableEvent):
if DagModel.dag_needing_dagruns(session, event.dag_id):
dagrun = self._create_dag_run(event.dag_id, session=session)
tasks = self._find_scheduled_tasks(dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
elif isinstance(event, EventHandleEvent):
dag_runs = DagRun.find(dag_id=event.dag_id, run_id=event.dag_run_id)
if len(dag_runs) < 1:
self.log.warning("DagRun not found by dag_id:{}, run_id:{}".format(
event.dag_id, event.dag_run_id))
else:
ti = dag_runs[0].get_task_instance(event.task_id)
self._send_scheduling_task_event(ti, event.action)
elif isinstance(event, StopDagEvent):
self._stop_dag(event.dag_id, session)
elif isinstance(event, DagRunFinishedEvent):
self._remove_periodic_events(event.dag_id, event.execution_date)
elif isinstance(event, PeriodicEvent):
dag_runs = DagRun.find(dag_id=event.dag_id, execution_date=event.execution_date)
if len(dag_runs) < 1:
self.log.warning("DagRun not found by dag_id:{}, run_id:{}".format(
event.dag_id, event.dag_run_id))
else:
ti = dag_runs[0].get_task_instance(event.task_id)
self._send_scheduling_task_event(ti, SchedulingAction.RESTART)
elif isinstance(event, StopSchedulerEvent):
self.log.info("{} {}".format(self.id, event.job_id))
if self.id == event.job_id or 0 == event.job_id:
self.log.info("break the scheduler event loop.")
identified_message.remove_handled_message()
session.expunge_all()
return False
elif isinstance(event, ParseDagRequestEvent) or isinstance(event, ParseDagResponseEvent):
pass
elif isinstance(event, ResponseEvent):
pass
else:
self.log.error("can not handler the event {}".format(event))
identified_message.remove_handled_message()
session.expunge_all()
return True
def _handle_task_status_changed(self, dagrun: DagRun, event: TaskStateChangedEvent, session):
ti = dagrun.get_task_instance(task_id=event.task_id)
if event.try_number == ti.try_number:
if State.UP_FOR_RETRY == event.state:
dag = self.dagbag.get_dag(dagrun.dag_id, session=session)
ti.task = dag.get_task(ti.task_id)
next_retry_datetime = ti.next_retry_datetime()
self.mailbox.send_message(message=TaskSchedulingEvent(dag_id=event.dag_id,
task_id=event.task_id,
execution_date=event.execution_date,
try_number=event.try_number,
action=SchedulingAction.START).to_event(),
queue_time=next_retry_datetime)
ti.update_latest_task_execution(session=session)
def stop(self) -> None:
self.mailbox.send_message(StopSchedulerEvent(self.id).to_event())
self.log.info("Send stop event to the scheduler.")
def recover(self, last_scheduling_id):
lost_dag_codes = DagCode.recover_lost_dag_code()
self.log.info("Found %s dags not exists in DAG folder, recovered from DB. Dags' path: %s",
len(lost_dag_codes), lost_dag_codes)
self.log.info("Waiting for executor recovery...")
self.executor.recover_state()
unprocessed_messages = self.get_unprocessed_message(last_scheduling_id)
self.log.info("Recovering %s messages of last scheduler job with id: %s",
len(unprocessed_messages), last_scheduling_id)
for msg in unprocessed_messages:
self.mailbox.send_message(msg.deserialize(), msg.queue_time)
@staticmethod
def get_unprocessed_message(last_scheduling_id: int) -> List[IdentifiedMessage]:
with create_session() as session:
results: List[MSG] = session.query(MSG).filter(
MSG.scheduling_job_id == last_scheduling_id,
MSG.state == MessageState.QUEUED
).order_by(asc(MSG.id)).all()
unprocessed: List[IdentifiedMessage] = []
for msg in results:
unprocessed.append(IdentifiedMessage(msg.data, msg.id, msg.queue_time))
return unprocessed
def _find_dagrun(self, dag_id, execution_date, session) -> DagRun:
dagrun = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.execution_date == execution_date
).first()
return dagrun
def _register_periodic_events(self, execution_date, dag, session=None):
self.periodic_manager.store.set_session(session)
for task in dag.tasks:
if task.executor_config is not None and 'periodic_config' in task.executor_config:
self.log.debug('register periodic task {} {} {}'.format(dag.dag_id, execution_date, task.task_id))
self.periodic_manager.add_task(dag_id=dag.dag_id,
execution_date=execution_date,
task_id=task.task_id,
periodic_config=task.executor_config['periodic_config'])
self.periodic_manager.store.unset_session()
@provide_session
def _remove_periodic_events(self, dag_id, execution_date, session=None):
dagruns = DagRun.find(dag_id=dag_id, execution_date=execution_date)
dag = self.dagbag.get_dag(dag_id=dagruns[0].dag_id, session=session)
for task in dag.tasks:
if task.executor_config is not None and 'periodic_config' in task.executor_config:
self.log.debug('remove periodic task {} {} {}'.format(dag_id, execution_date, task.task_id))
self.periodic_manager.remove_task(dag_id, execution_date, task.task_id)
def _create_dag_run(self, dag_id, session, run_type=DagRunType.SCHEDULED, context=None) -> DagRun:
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
try:
dag = self.dagbag.get_dag(dag_id, session=session)
dag_model = session \
.query(DagModel).filter(DagModel.dag_id == dag_id).first()
if dag_model is None:
return None
next_dagrun = dag_model.next_dagrun
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
external_trigger = False
# register periodic task
if run_type == DagRunType.MANUAL:
next_dagrun = timezone.utcnow()
external_trigger = True
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
active_dagrun = session.query(DagRun)\
.filter(DagRun.dag_id == dag_model.dag_id,
DagRun.execution_date == dag_model.next_dagrun).first()
if active_dagrun is not None:
self.log.info("Dagrun already created, %s", active_dagrun)
return active_dagrun
dag_run = dag.create_dagrun(
run_type=run_type,
execution_date=next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=external_trigger,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
context=context
)
if run_type == DagRunType.SCHEDULED:
self._update_dag_next_dagrun(dag_id, session)
self._register_periodic_events(dag_run.execution_date, dag, session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagrun
return dag_run
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_id)
return None
except Exception:
self.log.exception("Error occurred when create dag_run of dag: %s", dag_id)
return None
def _update_dag_next_dagrun(self, dag_id, session):
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
active_runs_of_dag = session \
.query(func.count('*')).filter(
DagRun.dag_id == dag_id,
DagRun.state == State.RUNNING,
DagRun.external_trigger.is_(False),
).scalar()
dag_model = session \
.query(DagModel).filter(DagModel.dag_id == dag_id).first()
dag = self.dagbag.get_dag(dag_id, session=session)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
def _schedule_task(self, scheduling_event: TaskSchedulingEvent):
task_key = TaskInstanceKey(
scheduling_event.dag_id,
scheduling_event.task_id,
scheduling_event.execution_date,
scheduling_event.try_number
)
self.executor.schedule_task(task_key, scheduling_event.action)
def _find_dagruns_by_event(self, event, session) -> Optional[List[DagRun]]:
affect_dag_runs = []
event_key = EventKey(event.key, event.event_type, event.namespace, event.sender)
dag_runs = session \
.query(DagRun).filter(DagRun.state == State.RUNNING).all()
self.log.debug('dag_runs {}'.format(len(dag_runs)))
if dag_runs is None or len(dag_runs) == 0:
return affect_dag_runs
dags = session.query(SerializedDagModel).filter(
SerializedDagModel.dag_id.in_(dag_run.dag_id for dag_run in dag_runs)
).all()
self.log.debug('dags {}'.format(len(dags)))
affect_dags = {}
for dag in dags:
self.log.debug('dag config {}'.format(dag.event_relationships))
self.log.debug('event key {} {} {}'.format(event.key, event.event_type, event.namespace))
dep: DagEventDependencies = DagEventDependencies.from_json(dag.event_relationships)
if dep.is_affect(event_key):
context_extractor: ContextExtractor = dag.context_extractor
try:
event_context: EventContext = context_extractor.extract_context(event)
except Exception as e:
self.log.error(
"Failed to call context extractor, dag {} skips event {}".format(dag.dag_id, event),
exc_info=e)
continue
if event_context is not None:
affect_dags[dag.dag_id] = event_context
if len(affect_dags) == 0:
return affect_dag_runs
for dag_run in dag_runs:
if dag_run.dag_id in affect_dags:
event_context: EventContext = affect_dags[dag_run.dag_id]
if event_context.is_broadcast() or dag_run.context in event_context.get_contexts():
affect_dag_runs.append(dag_run)
return affect_dag_runs
def _find_scheduled_tasks(
self,
dag_run: DagRun,
session: Session,
check_execution_date=False
) -> Optional[List[TI]]:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:return: scheduled tasks
"""
if not dag_run or dag_run.get_state() in State.finished:
return
try:
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
return None
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return None
currently_active_runs = session.query(
TI.execution_date,
).filter(
TI.dag_id == dag_run.dag_id,
TI.state.notin_(list(State.finished)),
).distinct().all()
if check_execution_date and dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.warning("Execution date is in future: %s", dag_run.execution_date)
return None
if dag.max_active_runs and not dag.is_long_running_dag():
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.warning(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
dag_run.schedule_tis(schedulable_tis, session)
session.commit()
query = (session.query(TI)
.outerjoin(TI.dag_run)
.filter(DR.run_id == dag_run.run_id)
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model')))
scheduled_tis: List[TI] = with_row_locks(
query,
of=TI,
**skip_locked(session=session),
).all()
return scheduled_tis
def _find_downstream_tasks(self, task_id, dag_run, session) -> Optional[List[TI]]:
tasks = self._find_scheduled_tasks(dag_run, session)
if not tasks or len(tasks) == 0:
return None
dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
downstream_task_ids = dag.task_dict.get(task_id).downstream_task_ids
res = []
for task in tasks:
if task.task_id in downstream_task_ids:
res.append(task)
return res
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_scheduling_task_event(self, ti: Optional[TI], action: SchedulingAction):
if ti is None or action == SchedulingAction.NONE:
return
with create_session() as session:
ti.state = State.QUEUED
session.commit()
task_scheduling_event = TaskSchedulingEvent(
ti.task_id,
ti.dag_id,
ti.execution_date,
ti.try_number,
action
)
self.mailbox.send_message(task_scheduling_event.to_event())
def _send_scheduling_task_events(self, tis: Optional[List[TI]], action: SchedulingAction):
if tis is None:
return
for ti in tis:
self._send_scheduling_task_event(ti, action)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING])
@staticmethod
def _reset_unfinished_task_state(dag_run):
with create_session() as session:
to_be_reset = [s for s in State.unfinished if s not in [State.RUNNING, State.QUEUED]]
tis = dag_run.get_task_instances(to_be_reset, session)
for ti in tis:
ti.state = State.NONE
session.commit()
@provide_session
def restore_unfinished_dag_run(self, session):
dag_runs = DagRun.next_dagruns_to_examine(session, max_number=sys.maxsize).all()
if not dag_runs or len(dag_runs) == 0:
return
for dag_run in dag_runs:
self._reset_unfinished_task_state(dag_run)
tasks = self._find_scheduled_tasks(dag_run, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def _process_request_event(self, event: RequestEvent, session: Session = None):
try:
message = BaseUserDefineMessage()
message.from_json(event.body)
if message.message_type == UserDefineMessageType.RUN_DAG:
# todo make sure dag file is parsed.
dagrun = self._create_dag_run(message.dag_id, session=session, run_type=DagRunType.MANUAL,
context=message.context)
if not dagrun:
self.log.error("Failed to create dag_run.")
# TODO Need to add ret_code and errro_msg in ExecutionContext in case of exception
self.notification_client.send_event(ResponseEvent(event.request_id, None).to_event())
return
tasks = self._find_scheduled_tasks(dagrun, session, False)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
self.notification_client.send_event(ResponseEvent(event.request_id, dagrun.run_id).to_event())
elif message.message_type == UserDefineMessageType.STOP_DAG_RUN:
dag_run = DagRun.get_run_by_id(session=session, dag_id=message.dag_id, run_id=message.dagrun_id)
self._stop_dag_run(dag_run)
self.notification_client.send_event(ResponseEvent(event.request_id, dag_run.run_id).to_event())
elif message.message_type == UserDefineMessageType.EXECUTE_TASK:
dagrun = DagRun.get_run_by_id(session=session, dag_id=message.dag_id, run_id=message.dagrun_id)
ti: TI = dagrun.get_task_instance(task_id=message.task_id)
self.mailbox.send_message(TaskSchedulingEvent(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=ti.execution_date,
try_number=ti.try_number,
action=SchedulingAction(message.action)
).to_event())
self.notification_client.send_event(ResponseEvent(event.request_id, dagrun.run_id).to_event())
except Exception:
self.log.exception("Error occurred when processing request event.")
def _stop_dag(self, dag_id, session: Session):
"""
Stop the dag. Pause the dag and cancel all running dag_runs and task_instances.
"""
DagModel.get_dagmodel(dag_id, session)\
.set_is_paused(is_paused=True, including_subdags=True, session=session)
active_runs = DagRun.find(dag_id=dag_id, state=State.RUNNING)
for dag_run in active_runs:
self._stop_dag_run(dag_run)
def _stop_dag_run(self, dag_run: DagRun):
dag_run.stop_dag_run()
for ti in dag_run.get_task_instances():
if ti.state in State.unfinished:
self.executor.schedule_task(ti.key, SchedulingAction.STOP)
self.mailbox.send_message(DagRunFinishedEvent(dag_id=dag_run.dag_id,
execution_date=dag_run.execution_date).to_event())
class SchedulerEventWatcher(EventWatcher):
def __init__(self, mailbox):
self.mailbox = mailbox
def process(self, events: List[BaseEvent]):
for e in events:
self.mailbox.send_message(e)
class EventBasedSchedulerJob(BaseJob):
"""
1. todo self heartbeat
"""
__mapper_args__ = {'polymorphic_identity': 'EventBasedSchedulerJob'}
def __init__(self, dag_directory,
notification_server_uri=None,
event_start_time=None,
max_runs=-1,
refresh_dag_dir_interval=conf.getint('scheduler', 'refresh_dag_dir_interval', fallback=1),
*args, **kwargs):
super().__init__(*args, **kwargs)
if notification_server_uri is None:
notification_server_uri = conf.get('scheduler', 'notification_server_uri', fallback='127.0.0.1:50052')
self.log.info("Starting event based scheduler with notification server uri: {}".format(notification_server_uri))
self.mailbox: Mailbox = Mailbox()
self.dag_trigger: DagTrigger = DagTrigger(
dag_directory=dag_directory,
max_runs=max_runs,
dag_ids=None,
pickle_dags=False,
mailbox=self.mailbox,
refresh_dag_dir_interval=refresh_dag_dir_interval,
notification_server_uri=notification_server_uri
)
self.task_event_manager = DagRunEventManager(self.mailbox)
self.executor.set_mailbox(self.mailbox)
self.executor.set_notification_server_uri(notification_server_uri)
self.notification_client: NotificationClient = NotificationClient(server_uri=notification_server_uri,
default_namespace=SCHEDULER_NAMESPACE)
self.periodic_manager = PeriodicManager(self.mailbox)
self.scheduler: EventBasedScheduler = EventBasedScheduler(
self.id,
self.mailbox,
self.task_event_manager,
self.executor,
self.notification_client,
notification_server_uri,
None,
self.periodic_manager
)
self.last_scheduling_id = self._last_scheduler_job_id()
self.need_recover_state = False
self.last_event_version = None
if event_start_time is None:
if self.last_scheduling_id is None:
self.start_time = int(time.time() * 1000)
else:
# need recover the state of the scheduler
self.start_time, self.last_event_version = self._get_progress(self.last_scheduling_id)
self.need_recover_state = True
else:
self.start_time = event_start_time
self.log.info('Progress {} {}'.format(self.start_time, self.last_event_version))
@staticmethod
def _last_scheduler_job_id():
last_run = EventBasedSchedulerJob.most_recent_job()
if not last_run:
return None
else:
return last_run.id
@staticmethod
def _get_progress(scheduling_job_id):
progress = get_event_progress(scheduling_job_id)
if progress is None:
return int(time.time() * 1000), None
else:
return progress.last_event_time, progress.last_event_version
def _execute(self):
# faulthandler.enable()
self.log.info("Starting the scheduler Job")
# DAGs can be pickled for easier remote execution by some executors
# pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
try:
self.mailbox.set_scheduling_job_id(self.id)
self.mailbox.start()
self.scheduler.id = self.id
self.dag_trigger.start()
self.task_event_manager.start()
self.executor.job_id = self.id
self.periodic_manager.start()
self.register_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
execute_start_time = timezone.utcnow()
self.scheduler.submit_sync_thread()
if self.need_recover_state:
self.scheduler.recover(self.last_scheduling_id)
self._set_event_progress()
self._start_listen_events()
self.executor.start()
self._run_scheduler_loop()
self._stop_listen_events()
self.periodic_manager.shutdown()
self.dag_trigger.end()
self.task_event_manager.end()
self.executor.end()
self.mailbox.stop()
settings.Session.remove() # type: ignore
except Exception as e: # pylint: disable=broad-except
self.log.exception("Exception when executing scheduler, %s", e)
finally:
self.log.info("Exited execute loop")
def _run_scheduler_loop(self) -> None:
self.log.info("Starting the scheduler loop.")
self.scheduler.restore_unfinished_dag_run()
should_continue = True
while should_continue:
try:
should_continue = self.scheduler.schedule()
self.heartbeat(only_if_necessary=True)
except Exception as e:
traceback.print_exc()
self.log.error('Scheduler error [%s]', traceback.format_exc())
time.sleep(1)
self.scheduler.stop_timer()
def _set_event_progress(self):
create_or_update_progress(scheduling_job_id=self.id,
last_event_time=self.start_time,
last_event_version=self.last_event_version)
def _start_listen_events(self):
watcher = SchedulerEventWatcher(self.mailbox)
self.notification_client.start_listen_events(
watcher=watcher,
start_time=self.start_time,
version=self.last_event_version
)
def _stop_listen_events(self):
self.notification_client.stop_listen_events()
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
|
sim.py
|
#-------------------------------------------------------------#
import timeit
import time # Use for time calls
from subprocess import call # Use for turning off the Pi
from multiprocessing import Process
import os
start=timeit.default_timer()
# Creating the function generator
os.chdir("/home/pi/PiBits/ServoBlaster/user") # changing the directory to acces$
call("sudo ./servod --cycle-time=5000us --max=100% --min=0us", shell=True) # $
call("pwd", shell=True) # printing the current directory to make sure we've cha$
time.sleep(0.1)
ServoBlaster = open('/dev/servoblaster', 'w') # opening servoblaster
def func1():
print('hi')
ServoBlaster.write('P1-16=100us' + '\n')
ServoBlaster.flush()
print('out1')
def func2():
time.sleep(.0006)
print('hi2')
ServoBlaster.write('P1-15=200us' + '\n')
ServoBlaster.flush()
print('out2')
if __name__== '__main__':
p1=Process(target=func1)
p1.start()
p2=Process(target=func2)
p2.start()
p1.join()
p2.join()
time.sleep(.1)
ServoBlaster.write('P1-11=0%' + '\n')
ServoBlaster.flush()
ServoBlaster.write('P1-12=0%' + '\n')
ServoBlaster.flush()
ServoBlaster.write('P1-15=0%' + '\n')
ServoBlaster.flush()
ServoBlaster.write('P1-16=0%' + '\n')
ServoBlaster.flush()
# will not pass 3 inputs simultaneously
|
image_logger.py
|
#
# This file is part of KwarqsDashboard.
#
# KwarqsDashboard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# KwarqsDashboard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with KwarqsDashboard. If not, see <http://www.gnu.org/licenses/>.
#
import datetime
import os.path
import threading
import cv2
import logging
from common import logutil
logger = logging.getLogger(__name__)
class ImageLogger(object):
def __init__(self, prefix, logdir):
self.prefix = prefix
self.logdir = logdir
self.has_image = False
self.do_stop = False
self.condition = threading.Condition()
self.thread = threading.Thread(target=self._log_thread)
def log_image(self, image):
h, w = image.shape[:2]
datestr = datetime.datetime.now().strftime('%Y-%m-%d %H%M-%S-%f')
filename = '%s%s@%sx%s.png' % (self.prefix, datestr, w, h)
filename = os.path.join(self.logdir, filename)
with self.condition:
self.has_image = True
# TODO: does making a copy here matter?
self.img = image.copy()
self.img_filename = filename
self.condition.notify()
def start(self):
if not self.thread.is_alive():
logger.info("Starting %s image logger to %s" % (self.prefix, self.logdir))
self.thread.start()
def stop(self):
with self.condition:
self.do_stop = True
self.condition.notify()
if self.thread.is_alive():
self.thread.join()
@logutil.exception_decorator(logger)
def _log_thread(self):
while True:
with self.condition:
if self.do_stop:
break
while not self.has_image and not self.do_stop:
self.condition.wait()
# if there's an image queued up, then we want to
# write it out before exiting
if not self.has_image:
continue
img = self.img
img_filename = self.img_filename
self.has_image = False
logger.debug('Writing image to %s' % img_filename)
cv2.imwrite(img_filename, img)
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import unittest
import subprocess
import textwrap
from contextlib import ExitStack
from io import StringIO
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function3(arg=None, *, kwonly=None):
... pass
>>> def test_function4(a, b, c, /):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... test_function3(kwonly=True)
... test_function4(1, 2, 3)
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'next', # step to test_function3()
... 'step', # stepping into test_function3()
... 'args', # display function args
... 'return', # return out of function
... 'next', # step to test_function4()
... 'step', # stepping to test_function4()
... 'args', # display function args
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[4]>(25)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(4)test_function()
-> test_function3(kwonly=True)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(1)test_function3()
-> def test_function3(arg=None, *, kwonly=None):
(Pdb) args
arg = None
kwonly = True
(Pdb) return
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(2)test_function3()->None
-> pass
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(5)test_function()
-> test_function4(1, 2, 3)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(1)test_function4()
-> def test_function4(a, b, c, /):
(Pdb) args
a = 1
b = 2
c = 3
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoing is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoing is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 6 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(5)test_function()
-> sess.set_trace(sys._getframe())
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(support.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
file_content = textwrap.dedent(file_content)
with open(support.TESTFN, 'w') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function('', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bar():
pass
def quux():
pass
""",
'bar',
('bar', 4),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13120(self):
# Invoking "continue" on a non-main thread triggered an exception
# inside signal.signal.
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue36250(self):
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
evt = threading.Event()
def start_pdb():
evt.wait()
pdb.Pdb(readrc=False).set_trace()
t = threading.Thread(target=start_pdb)
t.start()
pdb.Pdb(readrc=False).set_trace()
evt.set()
t.join()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\ncont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with support.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn("NameError: name 'invalid' is not defined",
stdout.decode())
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_readrc_homedir(self):
save_home = os.environ.pop("HOME", None)
with support.temp_dir() as temp_dir, patch("os.path.expanduser"):
rc_path = os.path.join(temp_dir, ".pdbrc")
os.path.expanduser.return_value = rc_path
try:
with open(rc_path, "w") as f:
f.write("invalid")
self.assertEqual(pdb.Pdb().rcLines[0], "invalid")
finally:
if save_home is not None:
os.environ["HOME"] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
support.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(support.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def test_relative_imports_on_plain_module(self):
# Validates running a plain module. See bpo32691
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/runme.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
"""))
commands = """
b 3
c
p module.var
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name + '.runme'], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
def test_errors_in_command(self):
commands = "\n".join([
'print(',
'debug print(',
'debug doesnotexist',
'c',
])
stdout, _ = self.run_pdb_script('', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'(Pdb) *** SyntaxError: unexpected EOF while parsing',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'*** SyntaxError: unexpected EOF while parsing',
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'> <string>(1)<module>()',
"((Pdb)) *** NameError: name 'doesnotexist' is not defined",
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ',
])
def load_tests(*args):
from test import test_pdb
suites = [
unittest.makeSuite(PdbTestCase),
doctest.DocTestSuite(test_pdb)
]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
ixnutils.py
|
# ixnutils.py
import asyncio
import datetime
import json
import time
import types
from threading import Lock, Thread
import logging
import snappi
from google.protobuf.any_pb2 import Any
from ..autogen import gnmi_pb2
from snappi import otg_pb2
from .client_session import ClientSession
from .utils import (RequestPathBase, RequestType, get_subscription_type,
get_time_elapsed, gnmi_path_to_string, init_logging)
ATHENA_POLL_INTERVAL = 0.05
IXN_POLL_INTERVAL = 4
POLL_INTERVAL = IXN_POLL_INTERVAL
g_RequestId = -1
def get_request_id():
global g_RequestId
g_RequestId += 1
class SubscriptionReq:
def __init__(self, subscriptionList, session, subscription):
# Assign subscriptionList properties
self.client = session
self.parent_encoding = subscriptionList.encoding
self.parent_mode = subscriptionList.mode
# Assign subscription item peroperties
self.uniqueId = get_request_id()
self.mode = subscription.mode
self.gnmipath = subscription.path
self.stringpath, self.name, self.key = gnmi_path_to_string(
subscription
)
self.type = get_subscription_type(self.stringpath)
self.callback, self.deserializer = TestManager.Instance().get_callback(
self.stringpath
)
self.sample_interval = subscription.sample_interval
self.last_polled = None
self.last_yield = None
self.active = False
self.curr_stats = None
self.prev_stats = None
self.delta_stats = None
self.encoded_stats = None
self.availabel_cols = []
self.subscribed_cols = []
self.error = None
def encode_stats(self, stats_name):
def add_header(stats_name, val):
path = gnmi_pb2.Path(elem=[
gnmi_pb2.PathElem(name='val', key={'name': stats_name})
])
update = gnmi_pb2.Update(path=path, val=val)
milliseconds = int(round(time.time() * 1000))
notification = gnmi_pb2.Notification(
timestamp=milliseconds, update=[update])
sub_res = gnmi_pb2.SubscribeResponse(update=notification)
return sub_res
stats = None
if self.mode == gnmi_pb2.SubscriptionMode.ON_CHANGE:
if self.delta_stats is None or len(self.delta_stats) == 0:
self.encoded_stats = None
return
stats = json.dumps(self.delta_stats)
else:
stats = self.curr_stats.serialize()
val = None
if (self.parent_encoding == gnmi_pb2.Encoding.JSON):
val = gnmi_pb2.TypedValue(json_val=stats.encode("utf-8"))
self.encoded_stats = add_header(stats_name, val)
return
if (self.parent_encoding == gnmi_pb2.Encoding.JSON_IETF):
val = gnmi_pb2.TypedValue(json_ietf_val=stats.encode("utf-8"))
self.encoded_stats = add_header(stats_name, val)
return
if (self.parent_encoding == gnmi_pb2.Encoding.PROTO):
stats = self.encode_metrics(stats)
val = stats
val = gnmi_pb2.TypedValue(any_val=stats)
self.encoded_stats = add_header(stats_name, val)
return
def encode_metrics(self, stats_json):
stats = json.loads(stats_json)
metric = self.deserializer.deserialize(stats)
target = Any()
target.Pack(metric)
return target
def compute_delta(self):
delta = {}
if self.curr_stats is None:
self.delta_stats = None
return
if self.prev_stats is None:
for key, curr_value in self.curr_stats._properties.items():
delta[key] = curr_value
self.delta_stats = delta
return
for key, curr_value in self.curr_stats._properties.items():
prev_val = self.prev_stats._get_property(key)
if curr_value != prev_val:
delta[key] = curr_value
self.delta_stats = delta
return
class TestManager:
m_instance = None
def __init__(self):
""" Constructor.
"""
if TestManager.m_instance is None:
TestManager.m_instance = self
self.init_once = False
else:
raise Exception("You cannot create another TaskScheduler class")
@staticmethod
def Instance():
""" Static method to fetch the current instance.
"""
if not TestManager.m_instance:
TestManager()
return TestManager.m_instance
async def init_once_func(self, options):
api_start = datetime.datetime.now()
try:
try:
if self.init_once is False:
self.app_mode = options.app_mode
self.unittest = options.unittest
self.target_address = options.target_address
log_stdout = not options.no_stdout
self.logger = init_logging(
'gnmi',
'ixutils-TestManager',
options.logfile,
logging.DEBUG,
log_stdout
)
self.profile_logger = init_logging(
'profile',
'ixutils-TestManager',
options.logfile,
logging.DEBUG,
log_stdout
)
self.api = None
self.stopped = False
self.client_sessions = {}
self.port_subscriptions = {}
self.flow_subscriptions = {}
self.neighbor_subscriptions = {}
self.protocol_subscriptions = {}
self.lock = Lock()
self.get_api()
self.start_worker_threads()
self.init_once = True
return self.init_once, None
except Exception as ex:
return self.init_once, str(ex)
return self.init_once, None
finally:
self.profile_logger.info(
"init_once_func completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
async def get_supported_models(self):
api_start = datetime.datetime.now()
try:
def get_supported_models():
supported_models = []
otg_model = gnmi_pb2.ModelData(
name='open-traffic-generator',
organization='otg',
version=get_version()
)
supported_models.append(otg_model)
return supported_models
def get_supported_encodings():
supported_encodings = []
supported_encodings.append(gnmi_pb2.Encoding.JSON)
supported_encodings.append(gnmi_pb2.Encoding.JSON_IETF)
supported_encodings.append(gnmi_pb2.Encoding.PROTO)
return supported_encodings
def get_version():
return '0.0.1'
cap_response = gnmi_pb2.CapabilityResponse(
supported_models=get_supported_models(),
supported_encodings=get_supported_encodings(),
gNMI_version=get_version()
)
return cap_response
finally:
if hasattr(self, 'profile_logger'):
self.profile_logger.info(
"get_supported_models completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def start_worker_threads(self):
api_start = datetime.datetime.now()
try:
self.logger.info('Starting all collection threads')
self.flow_stats_thread = Thread(
target=self.collect_flow_stats, args=[])
self.flow_stats_thread.start()
self.neighbor_stats_thread = Thread(
target=self.collect_neighbor_states, args=[])
self.neighbor_stats_thread.start()
self.port_stats_thread = Thread(
target=self.collect_port_stats, args=[])
self.port_stats_thread.start()
self.protocol_stats_thread = Thread(
target=self.collect_protocol_stats, args=[])
self.protocol_stats_thread.start()
finally:
self.profile_logger.info(
"start_worker_threads completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def stop_worker_threads(self):
api_start = datetime.datetime.now()
try:
if hasattr(self, 'logger'):
self.logger.info('Stopping all collection threads')
self.stopped = True
if hasattr(self, 'flow_stats_thread'):
self.flow_stats_thread.join()
if hasattr(self, 'neighbor_stats_thread'):
self.neighbor_stats_thread.join()
if hasattr(self, 'port_stats_thread'):
self.port_stats_thread.join()
if hasattr(self, 'protocol_stats_thread'):
self.protocol_stats_thread.join()
finally:
if hasattr(self, 'profile_logger'):
self.profile_logger.info(
"stop_worker_threads completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
async def terminate(self, request_iterator):
api_start = datetime.datetime.now()
try:
self.logger.info('Terminate connection')
self.stop_worker_threads()
await self.deregister_subscription(request_iterator)
self.dump_all_subscription()
finally:
self.profile_logger.info(
"terminate completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
async def create_session(self, context, request_iterator):
api_start = datetime.datetime.now()
try:
self.lock.acquire()
session = None
if context in self.client_sessions:
session = self.client_sessions[context]
else:
requests = []
await asyncio.wait_for(
self.parse_requests(request_iterator, requests),
timeout=1.0
)
session = ClientSession(context, requests)
self.client_sessions[context] = session
self.logger.info('Created new session %s', context)
self.lock.release()
return session
finally:
self.profile_logger.info(
"create_session completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
async def remove_session(self, context):
api_start = datetime.datetime.now()
try:
self.lock.acquire()
if context in self.client_sessions:
session = self.client_sessions.pop(context)
self.logger.info('Removed new session %s', context)
self.lock.release()
return session
finally:
self.profile_logger.info(
"remove_session completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
async def parse_requests(self, request_iterator, requests):
api_start = datetime.datetime.now()
try:
if isinstance(request_iterator, types.GeneratorType):
for request in request_iterator:
requests.append(request)
else:
requests.append(await request_iterator.__anext__())
except Exception as ex:
self.logger.error('Exception: %s', str(ex))
self.logger.error('Exception: ', exc_info=True)
finally:
self.profile_logger.info(
"parse_requests completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def get_callback(self, path):
api_start = datetime.datetime.now()
try:
if path.find(RequestPathBase.BASE_PORT_PATH) != -1:
return self.get_port_metric, otg_pb2.PortMetric()
if path.find(RequestPathBase.BASE_FLOW_PATH) != -1:
return self.get_flow_metric, otg_pb2.FlowMetric()
if path.find(RequestPathBase.BASE_NEIGHBORv4_PATH) != -1:
return self.get_ipv4_neighbor_state, otg_pb2.Neighborsv4State()
if path.find(RequestPathBase.BASE_NEIGHBORv6_PATH) != -1:
return self.get_ipv6_neighbor_state, otg_pb2.Neighborsv6State()
if path.find(RequestPathBase.BASE_BGPv4_PATH) != -1:
return self.get_bgpv4_metric, otg_pb2.Bgpv4Metric()
if path.find(RequestPathBase.BASE_BGPv6_PATH) != -1:
return self.get_bgpv6_metric, otg_pb2.Bgpv6Metric()
if path.find(RequestPathBase.BASE_ISIS_PATH) != -1:
return self.get_isis_metric, otg_pb2.IsisMetric()
return None
finally:
self.profile_logger.info(
"get_callback completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def collect_stats(self, subscriptions, meta):
api_start = datetime.datetime.now()
try:
self.lock.acquire()
self._collect_stats(subscriptions, meta)
self.lock.release()
finally:
self.profile_logger.info(
"collect_stats completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def _collect_stats(self, subscriptions, meta):
api_start = datetime.datetime.now()
try:
if len(subscriptions) == 0:
return
names = []
name_to_sub_reverse_map = {}
for key in subscriptions:
sub = subscriptions[key]
sub.error = None
names.append(sub.name)
name_to_sub_reverse_map[sub.name] = sub
# self.logger.info('Collect %s stats for %s', meta, names)
try:
metrics = sub.callback(names)
# self.logger.info('Collected %s stats for %s', meta, metrics)
for metric in metrics:
key = getattr(metric, sub.key)
if key not in name_to_sub_reverse_map:
continue
sub = name_to_sub_reverse_map[key]
sub.prev_stats = sub.curr_stats
sub.curr_stats = metric
sub.compute_delta()
sub.encode_stats(key)
except Exception as ex:
for key in subscriptions:
sub.error = str(ex)
except Exception:
self.logger.error(
"Fatal error in collecting stats for %s: names:%s",
meta,
names
)
self.logger.error("Fatal error: ", exc_info=True)
finally:
self.profile_logger.info(
"_collect_stats completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def collect_flow_stats(self):
api_start = datetime.datetime.now()
try:
global POLL_INTERVAL
self.logger.info('Started flow stats collection thread')
while self.stopped is False:
if len(self.flow_subscriptions) > 0:
self.collect_stats(self.flow_subscriptions, 'Flow')
time.sleep(POLL_INTERVAL)
finally:
self.profile_logger.info(
"collect_flow_stats completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def collect_neighbor_states(self):
api_start = datetime.datetime.now()
try:
global POLL_INTERVAL
self.logger.info('Started neighbor states collection thread')
while self.stopped is False:
if len(self.neighbor_subscriptions) > 0:
self.collect_stats(self.neighbor_subscriptions, 'Neighbor')
time.sleep(POLL_INTERVAL)
finally:
self.profile_logger.info(
"collect_neighbor_states completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def collect_port_stats(self):
api_start = datetime.datetime.now()
try:
global POLL_INTERVAL
self.logger.info('Started port stats collection thread')
while self.stopped is False:
if len(self.port_subscriptions) > 0:
self.collect_stats(self.port_subscriptions, 'Port')
time.sleep(POLL_INTERVAL)
finally:
self.profile_logger.info(
"collect_port_stats completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def collect_protocol_stats(self):
api_start = datetime.datetime.now()
try:
global POLL_INTERVAL
time.sleep(POLL_INTERVAL)
self.logger.info('Started protocol stats collection thread')
while self.stopped is False:
if len(self.protocol_subscriptions) > 0:
self.collect_stats(self.protocol_subscriptions, 'Protocol')
time.sleep(POLL_INTERVAL)
finally:
self.profile_logger.info(
"collect_protocol_stats completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def get_api(self):
api_start = datetime.datetime.now()
try:
if self.init_once:
return self.api
target = None
if self.unittest:
target = "http://{}".format('127.0.0.1:11020')
else:
target = "https://{}".format(self.target_address)
self.logger.info(
'Initializing snappi for %s at target %s',
self.app_mode,
target
)
# when using ixnetwork extension, host is IxNetwork API Server
if self.app_mode == 'ixnetwork':
self.api = snappi.api(location=target, ext='ixnetwork')
else:
global POLL_INTERVAL
POLL_INTERVAL = ATHENA_POLL_INTERVAL
self.api = snappi.api(location=target)
self.logger.info('Initialized snappi...')
return self.api
finally:
self.profile_logger.info(
"get_api completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def get_flow_metric(self, flow_names, stat_names=None):
api_start = datetime.datetime.now()
try:
api = self.get_api()
req = api.metrics_request()
req.choice = "flow"
req.flow.flow_names = flow_names
res = api.get_metrics(req)
return res.flow_metrics
finally:
self.profile_logger.info(
"get_flow_metric completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def get_port_metric(self, port_names, stat_names=None):
api_start = datetime.datetime.now()
try:
api = self.get_api()
req = api.metrics_request()
req.choice = "port"
req.port.port_names = port_names
res = api.get_metrics(req)
return res.port_metrics
finally:
self.profile_logger.info(
"get_port_metric completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def get_bgpv6_metric(self, peer_names, stat_names=None):
api_start = datetime.datetime.now()
try:
api = self.get_api()
req = api.metrics_request()
req.choice = "bgpv6"
req.bgpv6.peer_names = peer_names
res = api.get_metrics(req)
return res.bgpv6_metrics
finally:
self.profile_logger.info(
"get_bgpv6_metric completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def get_bgpv4_metric(self, peer_names, stat_names=None):
api_start = datetime.datetime.now()
try:
api = self.get_api()
req = api.metrics_request()
req.choice = "bgpv4"
req.bgpv4.peer_names = peer_names
res = api.get_metrics(req)
return res.bgpv4_metrics
finally:
self.profile_logger.info(
"get_bgpv4_metric completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def get_isis_metric(self, router_names, stat_names=None):
api_start = datetime.datetime.now()
try:
api = self.get_api()
req = api.metrics_request()
req.choice = "isis"
req.isis.router_names = router_names
res = api.get_metrics(req)
return res.isis_metrics
finally:
self.profile_logger.info(
"get_isis_metric completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def get_ipv4_neighbor_state(self, eth_names, stat_names=None):
api_start = datetime.datetime.now()
try:
api = self.get_api()
req = api.states_request()
req.choice = "ipv4_neighbors"
req.ipv4_neighbors.ethernet_names = eth_names
res = api.get_states(req)
return res.ipv4_neighbors
finally:
self.profile_logger.info(
"get_ipv4_neighbor_state completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def get_ipv6_neighbor_state(self, eth_names, stat_names=None):
api_start = datetime.datetime.now()
try:
api = self.get_api()
req = api.states_request()
req.choice = "ipv6_neighbors"
req.ipv6_neighbors.ethernet_names = eth_names
res = api.get_states(req)
return res.ipv6_neighbors
finally:
self.profile_logger.info(
"get_ipv6_neighbor_state completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def create_update_response(self, encoding, stats_name, stats):
api_start = datetime.datetime.now()
try:
val = None
if encoding == gnmi_pb2.Encoding.JSON:
val = gnmi_pb2.TypedValue(json_val=stats.encode("utf-8"))
if encoding == gnmi_pb2.Encoding.JSON_IETF:
val = gnmi_pb2.TypedValue(json_ietf_val=stats.encode("utf-8"))
if encoding == gnmi_pb2.Encoding.PROTO:
val = gnmi_pb2.TypedValue(any_val=stats)
path = gnmi_pb2.Path(elem=[
gnmi_pb2.PathElem(name='val', key={'name': stats_name})
])
update = gnmi_pb2.Update(path=path, val=val)
milliseconds = int(round(time.time() * 1000))
notification = gnmi_pb2.Notification(
timestamp=milliseconds, update=[update])
sub_res = gnmi_pb2.SubscribeResponse(update=notification)
return sub_res
finally:
self.profile_logger.info(
"create_update_response completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def encode_sync(self):
api_start = datetime.datetime.now()
try:
sync_resp = gnmi_pb2.SubscribeResponse(sync_response=True)
return sync_resp
finally:
self.profile_logger.info(
"encode_sync completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def create_error_response(self, stats_name, error_message):
api_start = datetime.datetime.now()
try:
# err = gnmi_pb2.Error(data=stats_name, message=error_message)
err = gnmi_pb2.Error(message=stats_name + ': ' + error_message)
err_res = gnmi_pb2.SubscribeResponse(error=err)
return err_res
finally:
self.profile_logger.info(
"create_error_response completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
def dump_all_subscription(self):
api_start = datetime.datetime.now()
try:
self.logger.info(
'Port Subscriptions: total subscription = %s', len(
self.port_subscriptions))
for path in self.port_subscriptions:
sub = self.port_subscriptions[path]
self.logger.info(
'\t\tSubscriptions: %s, [Key: %s, Name: %s]',
path,
sub.key,
sub.name
)
self.logger.info(
'Flow Subscriptions: total subscription = %s', len(
self.flow_subscriptions)
)
for path in self.flow_subscriptions:
sub = self.flow_subscriptions[path]
self.logger.info(
'\t\tSubscriptions: %s, [Key: %s, Name: %s]',
path,
sub.key,
sub.name
)
self.logger.info(
'Neighbor Subscriptions: total subscription = %s', len(
self.neighbor_subscriptions))
for path in self.neighbor_subscriptions:
sub = self.neighbor_subscriptions[path]
self.logger.info(
'\t\tSubscriptions: %s, [Key: %s, Name: %s]',
path,
sub.key,
sub.name
)
self.logger.info(
'Protocol Subscriptions: total subscription = %s',
len(self.protocol_subscriptions)
)
for path in self.protocol_subscriptions:
sub = self.protocol_subscriptions[path]
self.logger.info(
'\t\tSubscriptions: %s, [Key: %s, Name: %s]',
path,
sub.key,
sub.name
)
finally:
self.profile_logger.info(
"dump_all_subscription completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
async def register_subscription(self, session):
api_start = datetime.datetime.now()
try:
self.lock.acquire()
self.logger.info(
'Register Subscription for %s elements', len(session.requests))
try:
for request in session.requests:
if request is None:
continue
session.mode = request.subscribe.mode
for subscription in request.subscribe.subscription:
sub = SubscriptionReq(
request.subscribe, session, subscription)
sub.client.register_path(sub.stringpath)
sub.encoding = request.subscribe.encoding
self.logger.info(
'Register Subscription %s', sub.stringpath)
if sub.type == RequestType.PORT:
self.port_subscriptions[sub.stringpath] = sub
elif sub.type == RequestType.FLOW:
self.flow_subscriptions[sub.stringpath] = sub
elif sub.type == RequestType.NEIGHBOR:
self.neighbor_subscriptions[sub.stringpath] = sub
elif sub.type == RequestType.PROTOCOL:
self.protocol_subscriptions[sub.stringpath] = sub
else:
self.logger.info(
'Unknown Subscription %s', sub.stringpath)
except Exception as ex:
self.logger.error('Exception: %s', str(ex))
self.logger.error('Exception: ', exc_info=True)
self.dump_all_subscription()
self.lock.release()
finally:
self.profile_logger.info(
"register_subscription completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
async def deregister_subscription(self, session):
api_start = datetime.datetime.now()
try:
self.lock.acquire()
self.logger.info(
'Deregister Subscription for %s elements', len(
session.requests))
try:
for request in session.requests:
if request is None:
continue
session.mode = request.subscribe.mode
for subscription in request.subscribe.subscription:
sub = SubscriptionReq(
request.subscribe, session, subscription)
sub.client.deregister_path(sub.stringpath)
self.logger.info(
'Deregister Subscription %s', sub.stringpath)
if sub.type == RequestType.PORT:
self.port_subscriptions.pop(sub.stringpath)
elif sub.type == RequestType.FLOW:
self.flow_subscriptions.pop(sub.stringpath)
elif sub.type == RequestType.NEIGHBOR:
self.neighbor_subscriptions.pop(sub.stringpath)
elif sub.type == RequestType.PROTOCOL:
self.protocol_subscriptions.pop(sub.stringpath)
except Exception as ex:
self.logger.error('Exception: %s', str(ex))
self.logger.error('Exception: ', exc_info=True)
self.dump_all_subscription()
self.lock.release()
# self.stop_worker_threads()
finally:
self.profile_logger.info(
"deregister_subscription completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
async def publish_stats(self, session):
api_start = datetime.datetime.now()
try:
results = []
def publish(key, subscriptions, session, res, meta=None):
# self.logger.info('Publish %s Stats %s', meta, key)
sub = subscriptions[key]
if sub.error is not None:
res.append(self.create_error_response(
sub.name, sub.error))
return
if sub.encoded_stats is not None:
res.append(sub.encoded_stats)
sub.client.update_stats(key)
self.lock.acquire()
for key in self.port_subscriptions:
publish(key, self.port_subscriptions, session, results, 'Port')
for key in self.flow_subscriptions:
publish(key, self.flow_subscriptions, session, results, 'Flow')
for key in self.neighbor_subscriptions:
publish(
key,
self.neighbor_subscriptions,
session,
results,
'Neighbor'
)
for key in self.protocol_subscriptions:
publish(key, self.protocol_subscriptions,
session, results, 'Protocol')
self.lock.release()
if session.send_sync():
results.append(self.encode_sync())
return results
finally:
self.profile_logger.info(
"publish_stats completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
async def keep_polling(self):
api_start = datetime.datetime.now()
try:
return self.stopped is False
finally:
self.profile_logger.info(
"keep_polling completed!", extra={
'nanoseconds': get_time_elapsed(api_start)
}
)
# if __name__ == 'main':
# setup_test()
|
dump_frames.py
|
import time
import signal
import json
from os import makedirs, listdir
from os.path import exists, join, isfile, basename
from shutil import rmtree
from argparse import ArgumentParser
from multiprocessing import Process, Queue
import cv2
from tqdm import tqdm
def ensure_dir_exists(dir_path):
if not exists(dir_path):
makedirs(dir_path)
def get_valid_sources(all_sources):
return [s for s in all_sources if exists(s)]
def print_data_sources_stat(data_sources):
print('Specified {} valid data sources:'.format(len(data_sources)))
for data_source in data_sources:
print(' - {}'.format(data_source))
def collect_videos(data_sources):
out_videos = dict()
num_duplicated_videos = 0
for data_source in data_sources:
data_type = basename(data_source).replace('.json', '')
with open(data_source) as input_stream:
data = json.load(input_stream)
for record in data.values():
url = record['url']
video_name = url.split('?v=')[-1]
label = record['annotations']['label']
if video_name in out_videos:
num_duplicated_videos += 1
else:
out_videos[video_name] = data_type, label
if num_duplicated_videos > 0:
print('Num duplicated videos: {}'.format(num_duplicated_videos))
return out_videos
def filter_available(videos, videos_dir, extension):
downloaded_video_names = [f.replace('.{}'.format(extension), '')
for f in listdir(videos_dir) if isfile(join(videos_dir, f)) and f.endswith(extension)]
all_video_names = list(videos)
valid_video_names = list(set(all_video_names).intersection(set(downloaded_video_names)))
filtered_videos = {video_name: videos[video_name] for video_name in valid_video_names}
return filtered_videos
def parse_completed_tasks(out_dir):
candidate_files = [join(out_dir, f) for f in listdir(out_dir) if isfile(join(out_dir, f)) and f.endswith('.txt')]
completed_tasks = []
for candidate_file in candidate_files:
with open(candidate_file) as input_stream:
for line in input_stream:
values = line.strip().split(' ')
if len(values) == 3:
completed_tasks.append(values[0])
return completed_tasks
def exclude_completed(videos, completed_names):
all_video_names = list(videos)
valid_video_names = list(set(all_video_names) - set(completed_names))
filtered_videos = {video_name: videos[video_name] for video_name in valid_video_names}
return filtered_videos
def make_class_map(videos):
class_names = list(set([label_name for _, label_name in videos.values()]))
class_map = {n: i for i, n in enumerate(class_names)}
return class_map
def prepare_tasks(videos, videos_dir, video_extension, images_dir, class_map):
out_tasks_queue = Queue()
num_tasks = 0
for video_name, video_desc in videos.items():
video_frames_dir = join(images_dir, video_name)
label = class_map[video_desc[1]]
video_path = join(videos_dir, '{}.{}'.format(video_name, video_extension))
task = dict(name=video_name,
type=video_desc[0],
label=label,
video_path=video_path,
frames_path=video_frames_dir)
out_tasks_queue.put(task, True)
num_tasks += 1
return out_tasks_queue, num_tasks
def dump_frames(task, image_name_template, image_scale, frame_rate=3):
video_capture = cv2.VideoCapture(task['video_path'])
frame_count = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
if frame_count is None or frame_count <= 0:
return 0
video_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
video_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
max_side_size = max(video_width, video_height)
if max_side_size > image_scale:
scale = float(image_scale) / float(max_side_size)
trg_height, trg_width = int(video_height * scale), int(video_width * scale)
else:
trg_height, trg_width = int(video_height), int(video_width)
ensure_dir_exists(task['frames_path'])
num_read_frames = 0
for frame_id in range(frame_count):
success, frame = video_capture.read()
if not success:
break
if frame_id % frame_rate != 0:
continue
resized_image = cv2.resize(frame, (trg_width, trg_height))
out_image_path = join(task['frames_path'], image_name_template.format(num_read_frames + 1))
cv2.imwrite(out_image_path, resized_image)
num_read_frames += 1
video_capture.release()
return num_read_frames
def remove_if_exists(dir_path):
if exists(dir_path):
rmtree(dir_path)
def annotation_writer(completed_tasks_queue, out_streams, out_dir, pbar):
signal.signal(signal.SIGINT, signal.SIG_IGN)
while True:
if not completed_tasks_queue.empty():
is_valid, data_type, rel_path, num_frames, label = completed_tasks_queue.get(True)
if not is_valid:
pbar.update(1)
continue
if data_type not in out_streams:
out_streams[data_type] = open(join(out_dir, '{}.txt'.format(data_type)), 'a')
converted_record = (
str(rel_path),
str(num_frames),
str(label)
)
out_streams[data_type].write('{}\n'.format(' '.join(converted_record)))
out_streams[data_type].flush()
pbar.update(1)
else:
time.sleep(1)
def frame_writer(input_tasks_queue, completed_tasks_queue, min_length, image_name_template, image_scale):
signal.signal(signal.SIGINT, signal.SIG_IGN)
while not input_tasks_queue.empty():
task = input_tasks_queue.get(True)
num_frames = dump_frames(task, image_name_template, image_scale)
is_valid = num_frames >= min_length
if not is_valid:
remove_if_exists(task['frames_path'])
completed_task = is_valid, task['type'], task['name'], num_frames, task['label']
completed_tasks_queue.put(completed_task, True)
print('Frame writer process has been finished.')
def main():
parser = ArgumentParser()
parser.add_argument('--sources', '-s', nargs='+', type=str, required=True)
parser.add_argument('--videos_dir', '-v', type=str, required=True)
parser.add_argument('--output_dir', '-o', type=str, required=True)
parser.add_argument('--video_extension', '-ve', type=str, required=False, default='mp4')
parser.add_argument('--image_extension', '-ie', type=str, required=False, default='jpg')
parser.add_argument('--image_scale', '-is', type=int, required=False, default=256)
parser.add_argument('--min_length', '-ml', type=int, required=False, default=16)
parser.add_argument('--num_threads', '-n', type=int, required=False, default=8)
args = parser.parse_args()
assert exists(args.videos_dir)
assert args.num_threads >= 1
out_images_dir = join(args.output_dir, 'rawframes')
ensure_dir_exists(out_images_dir)
data_sources = get_valid_sources(args.sources)
print_data_sources_stat(data_sources)
assert len(data_sources) > 0
all_videos = collect_videos(data_sources)
filtered_videos = filter_available(all_videos, args.videos_dir, args.video_extension)
print('Available {} / {} videos'.format(len(filtered_videos), len(all_videos)))
completed_task_names = parse_completed_tasks(args.output_dir)
if len(completed_task_names) > 0:
filtered_videos = exclude_completed(filtered_videos, completed_task_names)
print('Found {} completed tasks'.format(len(completed_task_names)))
class_map = make_class_map(all_videos)
print('Found {} unique classes'.format(len(class_map)))
tasks_queue, num_tasks = prepare_tasks(filtered_videos, args.videos_dir, args.video_extension,
out_images_dir, class_map)
print('Prepared {} tasks'.format(num_tasks))
image_name_template = 'img_{:05}' + '.{}'.format(args.image_extension)
out_streams = dict()
completed_tasks_queue = Queue()
pbar = tqdm(total=num_tasks)
annotation_writer_process = Process(
target=annotation_writer, args=(completed_tasks_queue, out_streams, args.output_dir, pbar))
annotation_writer_process.daemon = True
annotation_writer_process.start()
frame_writers_pool = []
for _ in range(args.num_threads):
frame_writer_process = Process(target=frame_writer,
args=(tasks_queue, completed_tasks_queue, args.min_length,
image_name_template, args.image_scale))
frame_writer_process.daemon = True
frame_writer_process.start()
frame_writers_pool.append(frame_writer_process)
for frame_writer_process in frame_writers_pool:
frame_writer_process.join()
annotation_writer_process.join()
if __name__ == '__main__':
main()
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Vadercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test vadercoind shutdown."""
from test_framework.test_framework import VadercoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(VadercoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
safaribooks.py
|
#!/usr/bin/env python3
# coding: utf-8
import pathlib
import re
import os
import sys
import json
import shutil
import getpass
import logging
import argparse
import requests
import traceback
from html import escape
from random import random
from lxml import html, etree
from multiprocessing import Process, Queue, Value
from urllib.parse import urljoin, urlparse, parse_qs, quote_plus
import streamlit as st
import base64
PATH = os.path.dirname(os.path.realpath(__file__))
COOKIES_FILE = os.path.join(PATH, "cookies.json")
ORLY_BASE_HOST = "oreilly.com" # PLEASE INSERT URL HERE
SAFARI_BASE_HOST = "learning." + ORLY_BASE_HOST
API_ORIGIN_HOST = "api." + ORLY_BASE_HOST
ORLY_BASE_URL = "https://www." + ORLY_BASE_HOST
SAFARI_BASE_URL = "https://" + SAFARI_BASE_HOST
API_ORIGIN_URL = "https://" + API_ORIGIN_HOST
PROFILE_URL = SAFARI_BASE_URL + "/profile/"
# DEBUG
USE_PROXY = False
PROXIES = {"https": "https://127.0.0.1:8080"}
class Display:
BASE_FORMAT = logging.Formatter(
fmt="[%(asctime)s] %(message)s",
datefmt="%d/%b/%Y %H:%M:%S"
)
SH_DEFAULT = "\033[0m" if "win" not in sys.platform else "" # TODO: colors for Windows
SH_YELLOW = "\033[33m" if "win" not in sys.platform else ""
SH_BG_RED = "\033[41m" if "win" not in sys.platform else ""
SH_BG_YELLOW = "\033[43m" if "win" not in sys.platform else ""
def __init__(self, log_file):
self.output_dir = ""
self.output_dir_set = False
self.log_file = os.path.join(PATH, log_file)
self.logger = logging.getLogger("SafariBooks")
self.logger.setLevel(logging.INFO)
logs_handler = logging.FileHandler(filename=self.log_file)
logs_handler.setFormatter(self.BASE_FORMAT)
logs_handler.setLevel(logging.INFO)
self.logger.addHandler(logs_handler)
self.columns, _ = shutil.get_terminal_size()
self.logger.info("** Welcome to SafariBooks! **")
self.book_ad_info = False
self.css_ad_info = Value("i", 0)
self.images_ad_info = Value("i", 0)
self.last_request = (None,)
self.in_error = False
self.state_status = Value("i", 0)
sys.excepthook = self.unhandled_exception
def set_output_dir(self, output_dir):
self.info("Output directory:\n %s" % output_dir)
self.output_dir = output_dir
self.output_dir_set = True
def unregister(self):
self.logger.handlers[0].close()
sys.excepthook = sys.__excepthook__
def log(self, message):
try:
self.logger.info(str(message, "utf-8", "replace"))
except (UnicodeDecodeError, Exception):
self.logger.info(message)
def out(self, put):
pattern = "\r{!s}\r{!s}\n"
try:
s = pattern.format(" " * self.columns, str(put, "utf-8", "replace"))
except TypeError:
s = pattern.format(" " * self.columns, put)
sys.stdout.write(s)
def info(self, message, state=False):
self.log(message)
output = (self.SH_YELLOW + "[*]" + self.SH_DEFAULT if not state else
self.SH_BG_YELLOW + "[-]" + self.SH_DEFAULT) + " %s" % message
self.out(output)
def error(self, error):
if not self.in_error:
self.in_error = True
self.log(error)
output = self.SH_BG_RED + "[#]" + self.SH_DEFAULT + " %s" % error
self.out(output)
def exit(self, error):
self.error(str(error))
if self.output_dir_set:
output = (self.SH_YELLOW + "[+]" + self.SH_DEFAULT +
" Please delete the output directory '" + self.output_dir + "'"
" and restart the program.")
self.out(output)
output = self.SH_BG_RED + "[!]" + self.SH_DEFAULT + " Aborting..."
self.out(output)
self.save_last_request()
st.write(error)
st.stop()
sys.exit(1)
def unhandled_exception(self, _, o, tb):
self.log("".join(traceback.format_tb(tb)))
self.exit("Unhandled Exception: %s (type: %s)" % (o, o.__class__.__name__))
def save_last_request(self):
if any(self.last_request):
self.log("Last request done:\n\tURL: {0}\n\tDATA: {1}\n\tOTHERS: {2}\n\n\t{3}\n{4}\n\n{5}\n"
.format(*self.last_request))
def intro(self):
output = self.SH_YELLOW + ("""
____ ___ _
/ __/__ _/ _/__ _____(_)
_\ \/ _ `/ _/ _ `/ __/ /
/___/\_,_/_/ \_,_/_/ /_/
/ _ )___ ___ / /__ ___
/ _ / _ \/ _ \/ '_/(_-<
/____/\___/\___/_/\_\/___/
""" if random() > 0.5 else """
██████╗ ██████╗ ██╗ ██╗ ██╗██████╗
██╔═══██╗ ██╔══██╗██║ ╚██╗ ██╔╝╚════██╗
██║ ██║ ██████╔╝██║ ╚████╔╝ ▄███╔╝
██║ ██║ ██╔══██╗██║ ╚██╔╝ ▀▀══╝
╚██████╔╝ ██║ ██║███████╗██║ ██╗
╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝
""") + self.SH_DEFAULT
output += "\n" + "~" * (self.columns // 2)
self.out(output)
def parse_description(self, desc):
if not desc:
return "n/d"
try:
return html.fromstring(desc).text_content()
except (html.etree.ParseError, html.etree.ParserError) as e:
self.log("Error parsing the description: %s" % e)
return "n/d"
def book_info(self, info):
description = self.parse_description(info["description"]).replace("\n", " ")
for t in [
("Title", info["title"]), ("Authors", ", ".join(aut["name"] for aut in info["authors"])),
("Identifier", info["identifier"]), ("ISBN", info["isbn"]),
("Publishers", ", ".join(pub["name"] for pub in info["publishers"])),
("Rights", info["rights"]),
("Description", description[:500] + "..." if len(description) >= 500 else description),
("Release Date", info["issued"]),
("URL", info["web_url"])
]:
self.info("{0}{1}{2}: {3}".format(self.SH_YELLOW, t[0], self.SH_DEFAULT, t[1]), True)
def state(self, origin, done):
progress = int(done * 100 / origin)
bar = int(progress * (self.columns - 11) / 100)
if self.state_status.value < progress:
self.my_bar.progress(progress)
self.state_status.value = progress
sys.stdout.write(
"\r " + self.SH_BG_YELLOW + "[" + ("#" * bar).ljust(self.columns - 11, "-") + "]" +
self.SH_DEFAULT + ("%4s" % progress) + "%" + ("\n" if progress == 100 else "")
)
def done(self, epub_file):
self.info("Done: %s\n\n" % epub_file +
" If you like it, please * this project on GitHub to make it known:\n"
" https://github.com/lorenzodifuccia/safaribooks\n"
" e don't forget to renew your Safari Books Online subscription:\n"
" " + SAFARI_BASE_URL + "\n\n" +
self.SH_BG_RED + "[!]" + self.SH_DEFAULT + " Bye!!")
@staticmethod
def api_error(response):
message = "API: "
if "detail" in response and "Not found" in response["detail"]:
message += "book's not present in Safari Books Online.\n" \
" The book identifier is the digits that you can find in the URL:\n" \
" `" + SAFARI_BASE_URL + "/library/view/book-name/XXXXXXXXXXXXX/`"
else:
os.remove(COOKIES_FILE)
message += "Out-of-Session%s.\n" % (" (%s)" % response["detail"]) if "detail" in response else "" + \
Display.SH_YELLOW + "[+]" + Display.SH_DEFAULT + \
" Use the `--cred` or `--login` options in order to perform the auth login to Safari."
return message
class WinQueue(list): # TODO: error while use `process` in Windows: can't pickle _thread.RLock objects
def put(self, el):
self.append(el)
def qsize(self):
return self.__len__()
class SafariBooks:
LOGIN_URL = ORLY_BASE_URL + "/member/auth/login/"
LOGIN_ENTRY_URL = SAFARI_BASE_URL + "/login/unified/?next=/home/"
API_TEMPLATE = SAFARI_BASE_URL + "/api/v1/book/{0}/"
BASE_01_HTML = "<!DOCTYPE html>\n" \
"<html lang=\"en\" xml:lang=\"en\" xmlns=\"http://www.w3.org/1999/xhtml\"" \
" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"" \
" xsi:schemaLocation=\"http://www.w3.org/2002/06/xhtml2/" \
" http://www.w3.org/MarkUp/SCHEMA/xhtml2.xsd\"" \
" xmlns:epub=\"http://www.idpf.org/2007/ops\">\n" \
"<head>\n" \
"{0}\n" \
"<style type=\"text/css\">" \
"body{{margin:1em;background-color:transparent!important;}}" \
"#sbo-rt-content *{{text-indent:0pt!important;}}#sbo-rt-content .bq{{margin-right:1em!important;}}"
KINDLE_HTML = "#sbo-rt-content *{{word-wrap:break-word!important;" \
"word-break:break-word!important;}}#sbo-rt-content table,#sbo-rt-content pre" \
"{{overflow-x:unset!important;overflow:unset!important;" \
"overflow-y:unset!important;white-space:pre-wrap!important;}}"
BASE_02_HTML = "</style>" \
"</head>\n" \
"<body>{1}</body>\n</html>"
CONTAINER_XML = "<?xml version=\"1.0\"?>" \
"<container version=\"1.0\" xmlns=\"urn:oasis:names:tc:opendocument:xmlns:container\">" \
"<rootfiles>" \
"<rootfile full-path=\"OEBPS/content.opf\" media-type=\"application/oebps-package+xml\" />" \
"</rootfiles>" \
"</container>"
# Format: ID, Title, Authors, Description, Subjects, Publisher, Rights, Date, CoverId, MANIFEST, SPINE, CoverUrl
CONTENT_OPF = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" \
"<package xmlns=\"http://www.idpf.org/2007/opf\" unique-identifier=\"bookid\" version=\"2.0\" >\n" \
"<metadata xmlns:dc=\"http://purl.org/dc/elements/1.1/\" " \
" xmlns:opf=\"http://www.idpf.org/2007/opf\">\n" \
"<dc:title>{1}</dc:title>\n" \
"{2}\n" \
"<dc:description>{3}</dc:description>\n" \
"{4}" \
"<dc:publisher>{5}</dc:publisher>\n" \
"<dc:rights>{6}</dc:rights>\n" \
"<dc:language>en-US</dc:language>\n" \
"<dc:date>{7}</dc:date>\n" \
"<dc:identifier id=\"bookid\">{0}</dc:identifier>\n" \
"<meta name=\"cover\" content=\"{8}\"/>\n" \
"</metadata>\n" \
"<manifest>\n" \
"<item id=\"ncx\" href=\"toc.ncx\" media-type=\"application/x-dtbncx+xml\" />\n" \
"{9}\n" \
"</manifest>\n" \
"<spine toc=\"ncx\">\n{10}</spine>\n" \
"<guide><reference href=\"{11}\" title=\"Cover\" type=\"cover\" /></guide>\n" \
"</package>"
# Format: ID, Depth, Title, Author, NAVMAP
TOC_NCX = "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"no\" ?>\n" \
"<!DOCTYPE ncx PUBLIC \"-//NISO//DTD ncx 2005-1//EN\"" \
" \"http://www.daisy.org/z3986/2005/ncx-2005-1.dtd\">\n" \
"<ncx xmlns=\"http://www.daisy.org/z3986/2005/ncx/\" version=\"2005-1\">\n" \
"<head>\n" \
"<meta content=\"ID:ISBN:{0}\" name=\"dtb:uid\"/>\n" \
"<meta content=\"{1}\" name=\"dtb:depth\"/>\n" \
"<meta content=\"0\" name=\"dtb:totalPageCount\"/>\n" \
"<meta content=\"0\" name=\"dtb:maxPageNumber\"/>\n" \
"</head>\n" \
"<docTitle><text>{2}</text></docTitle>\n" \
"<docAuthor><text>{3}</text></docAuthor>\n" \
"<navMap>{4}</navMap>\n" \
"</ncx>"
HEADERS = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Referer": LOGIN_ENTRY_URL,
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/80.0.3987.163 Safari/537.36"
}
COOKIE_FLOAT_MAX_AGE_PATTERN = re.compile(r'(max-age=\d*\.\d*)', re.IGNORECASE)
def __init__(self, args):
self.args = args
#self.display = Display("info_%s.log" % escape(args.bookid))
self.display = Display("info_%s.log" % escape('12343'))
#self.display = Display("info.log")
self.display.intro()
self.session = requests.Session()
if USE_PROXY: # DEBUG
self.session.proxies = PROXIES
self.session.verify = False
self.session.headers.update(self.HEADERS)
self.jwt = {}
if not args.cred:
if not os.path.isfile(COOKIES_FILE):
self.display.exit("Login: unable to find `cookies.json` file.\n"
" Please use the `--cred` or `--login` options to perform the login.")
self.session.cookies.update(json.load(open(COOKIES_FILE)))
else:
self.display.info("Logging into Safari Books Online...", state=True)
self.do_login(*args.cred)
if not args.no_cookies:
json.dump(self.session.cookies.get_dict(), open(COOKIES_FILE, 'w'))
self.check_login()
def descargar_libro(self, args):
self.book_id = args.bookid
self.api_url = self.API_TEMPLATE.format(self.book_id)
self.display.info("Retrieving book info...")
self.book_info = self.get_book_info()
self.display.book_info(self.book_info)
self.display.info("Retrieving book chapters...")
self.book_chapters = self.get_book_chapters()
self.chapters_queue = self.book_chapters[:]
if len(self.book_chapters) > sys.getrecursionlimit():
sys.setrecursionlimit(len(self.book_chapters))
self.book_title = self.book_info["title"]
self.base_url = self.book_info["web_url"]
self.clean_book_title = "".join(self.escape_dirname(self.book_title).split(",")[:2]) \
+ " ({0})".format(self.book_id)
books_dir = os.path.join(PATH, "Books")
if not os.path.isdir(books_dir):
os.mkdir(books_dir)
self.BOOK_PATH = os.path.join(books_dir, self.clean_book_title)
self.display.set_output_dir(self.BOOK_PATH)
self.css_path = ""
self.images_path = ""
self.create_dirs()
self.chapter_title = ""
self.filename = ""
self.chapter_stylesheets = []
self.css = []
self.images = []
self.display.info("Downloading book contents... (%s chapters)" % len(self.book_chapters), state=True)
self.BASE_HTML = self.BASE_01_HTML + (self.KINDLE_HTML if not args.kindle else "") + self.BASE_02_HTML
st.write("Descargando el contenido de los libros...")
self.cover = False
self.get()
if not self.cover:
self.cover = self.get_default_cover()
cover_html = self.parse_html(
html.fromstring("<div id=\"sbo-rt-content\"><img src=\"Images/{0}\"></div>".format(self.cover)), True
)
self.book_chapters = [{
"filename": "default_cover.xhtml",
"title": "Cover"
}] + self.book_chapters
self.filename = self.book_chapters[0]["filename"]
self.save_page_html(cover_html)
self.css_done_queue = Queue(0) if "win" not in sys.platform else WinQueue()
self.display.info("Downloading book CSSs... (%s files)" % len(self.css), state=True)
st.write("Descargando CSS de los libros...")
self.collect_css()
self.images_done_queue = Queue(0) if "win" not in sys.platform else WinQueue()
self.display.info("Downloading book images... (%s files)" % len(self.images), state=True)
st.write("Descargando las imagenes de los libros...")
self.collect_images()
self.display.info("Creating EPUB file...", state=True)
st.write("Creando EPUB...")
self.create_epub()
if not args.no_cookies:
json.dump(self.session.cookies.get_dict(), open(COOKIES_FILE, "w"))
self.display.done(os.path.join(self.BOOK_PATH, self.book_id + ".epub"))
self.display.unregister()
if not self.display.in_error and not args.log:
os.remove(self.display.log_file)
def handle_cookie_update(self, set_cookie_headers):
for morsel in set_cookie_headers:
# Handle Float 'max-age' Cookie
if self.COOKIE_FLOAT_MAX_AGE_PATTERN.search(morsel):
cookie_key, cookie_value = morsel.split(";")[0].split("=")
self.session.cookies.set(cookie_key, cookie_value)
def requests_provider(self, url, is_post=False, data=None, perform_redirect=True, **kwargs):
try:
response = getattr(self.session, "post" if is_post else "get")(
url,
data=data,
allow_redirects=False,
**kwargs
)
self.handle_cookie_update(response.raw.headers.getlist("Set-Cookie"))
self.display.last_request = (
url, data, kwargs, response.status_code, "\n".join(
["\t{}: {}".format(*h) for h in response.headers.items()]
), response.text
)
except (requests.ConnectionError, requests.ConnectTimeout, requests.RequestException) as request_exception:
self.display.error(str(request_exception))
return 0
if response.is_redirect and perform_redirect:
return self.requests_provider(response.next.url, is_post, None, perform_redirect)
# TODO How about **kwargs?
return response
@staticmethod
def parse_cred(cred):
if ":" not in cred:
return False
sep = cred.index(":")
new_cred = ["", ""]
new_cred[0] = cred[:sep].strip("'").strip('"')
if "@" not in new_cred[0]:
return False
new_cred[1] = cred[sep + 1:]
return new_cred
def do_login(self, email, password):
response = self.requests_provider(self.LOGIN_ENTRY_URL)
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
next_parameter = None
try:
next_parameter = parse_qs(urlparse(response.request.url).query)["next"][0]
except (AttributeError, ValueError, IndexError):
self.display.exit("Login: unable to complete login on Safari Books Online. Try again...")
redirect_uri = API_ORIGIN_URL + quote_plus(next_parameter)
response = self.requests_provider(
self.LOGIN_URL,
is_post=True,
json={
"email": email,
"password": password,
"redirect_uri": redirect_uri
},
perform_redirect=False
)
if response == 0:
self.display.exit("Login: unable to perform auth to Safari Books Online.\n Try again...")
if response.status_code != 200: # TODO To be reviewed
try:
error_page = html.fromstring(response.text)
errors_message = error_page.xpath("//ul[@class='errorlist']//li/text()")
recaptcha = error_page.xpath("//div[@class='g-recaptcha']")
messages = ([" `%s`" % error for error in errors_message
if "password" in error or "email" in error] if len(errors_message) else []) + \
([" `ReCaptcha required (wait or do logout from the website).`"] if len(
recaptcha) else [])
self.display.exit(
"Login: unable to perform auth login to Safari Books Online.\n" + self.display.SH_YELLOW +
"[*]" + self.display.SH_DEFAULT + " Details:\n" + "%s" % "\n".join(
messages if len(messages) else [" Unexpected error!"])
)
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Login: your login went wrong and it encountered in an error"
" trying to parse the login details of Safari Books Online. Try again..."
)
self.jwt = response.json() # TODO: save JWT Tokens and use the refresh_token to restore user session
response = self.requests_provider(self.jwt["redirect_uri"])
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
def check_login(self):
response = self.requests_provider(PROFILE_URL, perform_redirect=False)
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
elif response.status_code != 200:
self.display.exit("Authentication issue: unable to access profile page.")
elif "user_type\":\"Expired" in response.text:
self.display.exit("Authentication issue: account subscription expired.")
self.display.info("Successfully authenticated.", state=True)
def get_book_info(self):
response = self.requests_provider(self.api_url)
if response == 0:
self.display.exit("API: unable to retrieve book info.")
response = response.json()
if not isinstance(response, dict) or len(response.keys()) == 1:
self.display.exit(self.display.api_error(response))
if "last_chapter_read" in response:
del response["last_chapter_read"]
for key, value in response.items():
if value is None:
response[key] = 'n/a'
return response
def get_book_chapters(self, page=1):
response = self.requests_provider(urljoin(self.api_url, "chapter/?page=%s" % page))
if response == 0:
self.display.exit("API: unable to retrieve book chapters.")
response = response.json()
if not isinstance(response, dict) or len(response.keys()) == 1:
self.display.exit(self.display.api_error(response))
if "results" not in response or not len(response["results"]):
self.display.exit("API: unable to retrieve book chapters.")
if response["count"] > sys.getrecursionlimit():
sys.setrecursionlimit(response["count"])
result = []
result.extend([c for c in response["results"] if "cover" in c["filename"] or "cover" in c["title"]])
for c in result:
del response["results"][response["results"].index(c)]
result += response["results"]
return result + (self.get_book_chapters(page + 1) if response["next"] else [])
def get_default_cover(self):
response = self.requests_provider(self.book_info["cover"], stream=True)
if response == 0:
self.display.error("Error trying to retrieve the cover: %s" % self.book_info["cover"])
return False
file_ext = response.headers["Content-Type"].split("/")[-1]
with open(os.path.join(self.images_path, "default_cover." + file_ext), 'wb') as i:
for chunk in response.iter_content(1024):
i.write(chunk)
return "default_cover." + file_ext
def get_html(self, url):
response = self.requests_provider(url)
if response == 0 or response.status_code != 200:
self.display.exit(
"Crawler: error trying to retrieve this page: %s (%s)\n From: %s" %
(self.filename, self.chapter_title, url)
)
root = None
try:
root = html.fromstring(response.text, base_url=SAFARI_BASE_URL)
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Crawler: error trying to parse this page: %s (%s)\n From: %s" %
(self.filename, self.chapter_title, url)
)
return root
@staticmethod
def url_is_absolute(url):
return bool(urlparse(url).netloc)
@staticmethod
def is_image_link(url: str):
return pathlib.Path(url).suffix[1:].lower() in ["jpg", "jpeg", "png", "gif"]
def link_replace(self, link):
if link and not link.startswith("mailto"):
if not self.url_is_absolute(link):
if any(x in link for x in ["cover", "images", "graphics"]) or \
self.is_image_link(link):
image = link.split("/")[-1]
return "Images/" + image
return link.replace(".html", ".xhtml")
else:
if self.book_id in link:
return self.link_replace(link.split(self.book_id)[-1])
return link
@staticmethod
def get_cover(html_root):
lowercase_ns = etree.FunctionNamespace(None)
lowercase_ns["lower-case"] = lambda _, n: n[0].lower() if n and len(n) else ""
images = html_root.xpath("//img[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover') or"
"contains(lower-case(@alt), 'cover')]")
if len(images):
return images[0]
divs = html_root.xpath("//div[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover')]//img")
if len(divs):
return divs[0]
a = html_root.xpath("//a[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover')]//img")
if len(a):
return a[0]
return None
def parse_html(self, root, first_page=False):
if random() > 0.8:
if len(root.xpath("//div[@class='controls']/a/text()")):
self.display.exit(self.display.api_error(" "))
book_content = root.xpath("//div[@id='sbo-rt-content']")
if not len(book_content):
self.display.exit(
"Parser: book content's corrupted or not present: %s (%s)" %
(self.filename, self.chapter_title)
)
page_css = ""
if len(self.chapter_stylesheets):
for chapter_css_url in self.chapter_stylesheets:
if chapter_css_url not in self.css:
self.css.append(chapter_css_url)
self.display.log("Crawler: found a new CSS at %s" % chapter_css_url)
page_css += "<link href=\"Styles/Style{0:0>2}.css\" " \
"rel=\"stylesheet\" type=\"text/css\" />\n".format(self.css.index(chapter_css_url))
stylesheet_links = root.xpath("//link[@rel='stylesheet']")
if len(stylesheet_links):
for s in stylesheet_links:
css_url = urljoin("https:", s.attrib["href"]) if s.attrib["href"][:2] == "//" \
else urljoin(self.base_url, s.attrib["href"])
if css_url not in self.css:
self.css.append(css_url)
self.display.log("Crawler: found a new CSS at %s" % css_url)
page_css += "<link href=\"Styles/Style{0:0>2}.css\" " \
"rel=\"stylesheet\" type=\"text/css\" />\n".format(self.css.index(css_url))
stylesheets = root.xpath("//style")
if len(stylesheets):
for css in stylesheets:
if "data-template" in css.attrib and len(css.attrib["data-template"]):
css.text = css.attrib["data-template"]
del css.attrib["data-template"]
try:
page_css += html.tostring(css, method="xml", encoding='unicode') + "\n"
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Parser: error trying to parse one CSS found in this page: %s (%s)" %
(self.filename, self.chapter_title)
)
# TODO: add all not covered tag for `link_replace` function
svg_image_tags = root.xpath("//image")
if len(svg_image_tags):
for img in svg_image_tags:
image_attr_href = [x for x in img.attrib.keys() if "href" in x]
if len(image_attr_href):
svg_url = img.attrib.get(image_attr_href[0])
svg_root = img.getparent().getparent()
new_img = svg_root.makeelement("img")
new_img.attrib.update({"src": svg_url})
svg_root.remove(img.getparent())
svg_root.append(new_img)
book_content = book_content[0]
book_content.rewrite_links(self.link_replace)
xhtml = None
try:
if first_page:
is_cover = self.get_cover(book_content)
if is_cover is not None:
page_css = "<style>" \
"body{display:table;position:absolute;margin:0!important;height:100%;width:100%;}" \
"#Cover{display:table-cell;vertical-align:middle;text-align:center;}" \
"img{height:90vh;margin-left:auto;margin-right:auto;}" \
"</style>"
cover_html = html.fromstring("<div id=\"Cover\"></div>")
cover_div = cover_html.xpath("//div")[0]
cover_img = cover_div.makeelement("img")
cover_img.attrib.update({"src": is_cover.attrib["src"]})
cover_div.append(cover_img)
book_content = cover_html
self.cover = is_cover.attrib["src"]
xhtml = html.tostring(book_content, method="xml", encoding='unicode')
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Parser: error trying to parse HTML of this page: %s (%s)" %
(self.filename, self.chapter_title)
)
return page_css, xhtml
@staticmethod
def escape_dirname(dirname, clean_space=False):
if ":" in dirname:
if dirname.index(":") > 15:
dirname = dirname.split(":")[0]
elif "win" in sys.platform:
dirname = dirname.replace(":", ",")
for ch in ['~', '#', '%', '&', '*', '{', '}', '\\', '<', '>', '?', '/', '`', '\'', '"', '|', '+', ':']:
if ch in dirname:
dirname = dirname.replace(ch, "_")
return dirname if not clean_space else dirname.replace(" ", "")
def create_dirs(self):
if os.path.isdir(self.BOOK_PATH):
self.display.log("Book directory already exists: %s" % self.BOOK_PATH)
else:
os.makedirs(self.BOOK_PATH)
oebps = os.path.join(self.BOOK_PATH, "OEBPS")
if not os.path.isdir(oebps):
self.display.book_ad_info = True
os.makedirs(oebps)
self.css_path = os.path.join(oebps, "Styles")
if os.path.isdir(self.css_path):
self.display.log("CSSs directory already exists: %s" % self.css_path)
else:
os.makedirs(self.css_path)
self.display.css_ad_info.value = 1
self.images_path = os.path.join(oebps, "Images")
if os.path.isdir(self.images_path):
self.display.log("Images directory already exists: %s" % self.images_path)
else:
os.makedirs(self.images_path)
self.display.images_ad_info.value = 1
def save_page_html(self, contents):
self.filename = self.filename.replace(".html", ".xhtml")
open(os.path.join(self.BOOK_PATH, "OEBPS", self.filename), "wb") \
.write(self.BASE_HTML.format(contents[0], contents[1]).encode("utf-8", 'xmlcharrefreplace'))
self.display.log("Created: %s" % self.filename)
def get(self):
len_books = len(self.book_chapters)
self.display.my_bar = st.progress(0)
for _ in range(len_books):
if not len(self.chapters_queue):
return
first_page = len_books == len(self.chapters_queue)
next_chapter = self.chapters_queue.pop(0)
self.chapter_title = next_chapter["title"]
self.filename = next_chapter["filename"]
# Images
if "images" in next_chapter and len(next_chapter["images"]):
self.images.extend(urljoin(next_chapter['asset_base_url'], img_url)
for img_url in next_chapter['images'])
# Stylesheets
self.chapter_stylesheets = []
if "stylesheets" in next_chapter and len(next_chapter["stylesheets"]):
self.chapter_stylesheets.extend(x["url"] for x in next_chapter["stylesheets"])
if "site_styles" in next_chapter and len(next_chapter["site_styles"]):
self.chapter_stylesheets.extend(next_chapter["site_styles"])
if os.path.isfile(os.path.join(self.BOOK_PATH, "OEBPS", self.filename.replace(".html", ".xhtml"))):
if not self.display.book_ad_info and \
next_chapter not in self.book_chapters[:self.book_chapters.index(next_chapter)]:
self.display.info(
("File `%s` already exists.\n"
" If you want to download again all the book%s,\n"
" please delete the output directory '" + self.BOOK_PATH + "' and restart the program.")
%
(
self.filename.replace(".html", ".xhtml"),
" (especially because you selected the `--no-kindle` option)"
if self.args.no_kindle else ""
)
)
self.display.book_ad_info = 2
else:
self.save_page_html(self.parse_html(self.get_html(next_chapter["content"]), first_page))
self.display.state(len_books, len_books - len(self.chapters_queue))
def _thread_download_css(self, url):
css_file = os.path.join(self.css_path, "Style{0:0>2}.css".format(self.css.index(url)))
if os.path.isfile(css_file):
if not self.display.css_ad_info.value and url not in self.css[:self.css.index(url)]:
self.display.info(("File `%s` already exists.\n"
" If you want to download again all the CSSs,\n"
" please delete the output directory '" + self.BOOK_PATH + "'"
" and restart the program.") %
css_file)
self.display.css_ad_info.value = 1
else:
response = self.requests_provider(url)
if response == 0:
self.display.error("Error trying to retrieve this CSS: %s\n From: %s" % (css_file, url))
with open(css_file, 'wb') as s:
s.write(response.content)
self.css_done_queue.put(1)
self.display.state(len(self.css), self.css_done_queue.qsize())
def _thread_download_images(self, url):
image_name = url.split("/")[-1]
image_path = os.path.join(self.images_path, image_name)
if os.path.isfile(image_path):
if not self.display.images_ad_info.value and url not in self.images[:self.images.index(url)]:
self.display.info(("File `%s` already exists.\n"
" If you want to download again all the images,\n"
" please delete the output directory '" + self.BOOK_PATH + "'"
" and restart the program.") %
image_name)
self.display.images_ad_info.value = 1
else:
response = self.requests_provider(urljoin(SAFARI_BASE_URL, url), stream=True)
if response == 0:
self.display.error("Error trying to retrieve this image: %s\n From: %s" % (image_name, url))
return
with open(image_path, 'wb') as img:
for chunk in response.iter_content(1024):
img.write(chunk)
self.images_done_queue.put(1)
self.display.state(len(self.images), self.images_done_queue.qsize())
def _start_multiprocessing(self, operation, full_queue):
if len(full_queue) > 5:
for i in range(0, len(full_queue), 5):
self._start_multiprocessing(operation, full_queue[i:i + 5])
else:
process_queue = [Process(target=operation, args=(arg,)) for arg in full_queue]
for proc in process_queue:
proc.start()
for proc in process_queue:
proc.join()
def collect_css(self):
self.display.state_status.value = -1
# "self._start_multiprocessing" seems to cause problem. Switching to mono-thread download.
self.display.my_bar = st.progress(0)
for css_url in self.css:
self._thread_download_css(css_url)
def collect_images(self):
if self.display.book_ad_info == 2:
self.display.info("Some of the book contents were already downloaded.\n"
" If you want to be sure that all the images will be downloaded,\n"
" please delete the output direcotry '" + self.BOOK_PATH +
"' and restart the program.")
self.display.state_status.value = -1
# "self._start_multiprocessing" seems to cause problem. Switching to mono-thread download.
self.display.my_bar = st.progress(0)
for image_url in self.images:
self._thread_download_images(image_url)
def create_content_opf(self):
self.css = next(os.walk(self.css_path))[2]
self.images = next(os.walk(self.images_path))[2]
manifest = []
spine = []
for c in self.book_chapters:
c["filename"] = c["filename"].replace(".html", ".xhtml")
item_id = escape("".join(c["filename"].split(".")[:-1]))
manifest.append("<item id=\"{0}\" href=\"{1}\" media-type=\"application/xhtml+xml\" />".format(
item_id, c["filename"]
))
spine.append("<itemref idref=\"{0}\"/>".format(item_id))
for i in set(self.images):
dot_split = i.split(".")
head = "img_" + escape("".join(dot_split[:-1]))
extension = dot_split[-1]
manifest.append("<item id=\"{0}\" href=\"Images/{1}\" media-type=\"image/{2}\" />".format(
head, i, "jpeg" if "jp" in extension else extension
))
for i in range(len(self.css)):
manifest.append("<item id=\"style_{0:0>2}\" href=\"Styles/Style{0:0>2}.css\" "
"media-type=\"text/css\" />".format(i))
authors = "\n".join("<dc:creator opf:file-as=\"{0}\" opf:role=\"aut\">{0}</dc:creator>".format(
escape(aut["name"])
) for aut in self.book_info["authors"])
subjects = "\n".join("<dc:subject>{0}</dc:subject>".format(escape(sub["name"]))
for sub in self.book_info["subjects"])
return self.CONTENT_OPF.format(
(self.book_info["isbn"] if self.book_info["isbn"] else self.book_id),
escape(self.book_title),
authors,
escape(self.book_info["description"]),
subjects,
", ".join(escape(pub["name"]) for pub in self.book_info["publishers"]),
escape(self.book_info["rights"]) if self.book_info["rights"] else "",
self.book_info["issued"],
self.cover,
"\n".join(manifest),
"\n".join(spine),
self.book_chapters[0]["filename"].replace(".html", ".xhtml")
)
@staticmethod
def parse_toc(l, c=0, mx=0):
r = ""
for cc in l:
c += 1
if int(cc["depth"]) > mx:
mx = int(cc["depth"])
r += "<navPoint id=\"{0}\" playOrder=\"{1}\">" \
"<navLabel><text>{2}</text></navLabel>" \
"<content src=\"{3}\"/>".format(
cc["fragment"] if len(cc["fragment"]) else cc["id"], c,
escape(cc["label"]), cc["href"].replace(".html", ".xhtml").split("/")[-1]
)
if cc["children"]:
sr, c, mx = SafariBooks.parse_toc(cc["children"], c, mx)
r += sr
r += "</navPoint>\n"
return r, c, mx
def create_toc(self):
response = self.requests_provider(urljoin(self.api_url, "toc/"))
if response == 0:
self.display.exit("API: unable to retrieve book chapters. "
"Don't delete any files, just run again this program"
" in order to complete the `.epub` creation!")
response = response.json()
if not isinstance(response, list) and len(response.keys()) == 1:
self.display.exit(
self.display.api_error(response) +
" Don't delete any files, just run again this program"
" in order to complete the `.epub` creation!"
)
navmap, _, max_depth = self.parse_toc(response)
return self.TOC_NCX.format(
(self.book_info["isbn"] if self.book_info["isbn"] else self.book_id),
max_depth,
self.book_title,
", ".join(aut["name"] for aut in self.book_info["authors"]),
navmap
)
def create_epub(self):
open(os.path.join(self.BOOK_PATH, "mimetype"), "w").write("application/epub+zip")
meta_info = os.path.join(self.BOOK_PATH, "META-INF")
if os.path.isdir(meta_info):
self.display.log("META-INF directory already exists: %s" % meta_info)
else:
os.makedirs(meta_info)
open(os.path.join(meta_info, "container.xml"), "wb").write(
self.CONTAINER_XML.encode("utf-8", "xmlcharrefreplace")
)
open(os.path.join(self.BOOK_PATH, "OEBPS", "content.opf"), "wb").write(
self.create_content_opf().encode("utf-8", "xmlcharrefreplace")
)
open(os.path.join(self.BOOK_PATH, "OEBPS", "toc.ncx"), "wb").write(
self.create_toc().encode("utf-8", "xmlcharrefreplace")
)
zip_file = os.path.join(PATH, "Books", self.book_id)
if os.path.isfile(zip_file + ".zip"):
os.remove(zip_file + ".zip")
shutil.make_archive(zip_file, 'zip', self.BOOK_PATH)
os.rename(zip_file + ".zip", os.path.join(self.BOOK_PATH, self.book_id) + ".epub")
dir = os.path.join(self.BOOK_PATH, self.book_id) + ".epub"
with open(dir, 'rb') as f:
book = f.read()
b64 = base64.b64encode(book).decode()
href = f'<a href="data:file/epub;base64,{b64}">Descargar el libro</a>'
st.markdown(href, unsafe_allow_html=True)
# MAIN
"""
if __name__ == "__main__":
arguments = argparse.ArgumentParser(prog="safaribooks.py",
description="Download and generate an EPUB of your favorite books"
" from Safari Books Online.",
add_help=False,
allow_abbrev=False)
login_arg_group = arguments.add_mutually_exclusive_group()
login_arg_group.add_argument(
"--cred", metavar="<EMAIL:PASS>", default=False,
help="Credentials used to perform the auth login on Safari Books Online."
" Es. ` --cred \"account_mail@mail.com:password01\" `."
)
login_arg_group.add_argument(
"--login", action='store_true',
help="Prompt for credentials used to perform the auth login on Safari Books Online."
)
arguments.add_argument(
"--no-cookies", dest="no_cookies", action='store_true',
help="Prevent your session data to be saved into `cookies.json` file."
)
arguments.add_argument(
"--kindle", dest="kindle", action='store_true',
help="Add some CSS rules that block overflow on `table` and `pre` elements."
" Use this option if you're going to export the EPUB to E-Readers like Amazon Kindle."
)
arguments.add_argument(
"--preserve-log", dest="log", action='store_true', help="Leave the `info_XXXXXXXXXXXXX.log`"
" file even if there isn't any error."
)
arguments.add_argument("--help", action="help", default=argparse.SUPPRESS, help='Show this help message.')
arguments.add_argument(
"bookid", metavar='<BOOK ID>',
help="Book digits ID that you want to download. You can find it in the URL (X-es):"
" `" + SAFARI_BASE_URL + "/library/view/book-name/XXXXXXXXXXXXX/`"
)
args_parsed = arguments.parse_args()
if args_parsed.cred or args_parsed.login:
user_email = ""
pre_cred = ""
if args_parsed.cred:
pre_cred = args_parsed.cred
else:
user_email = input("Email: ")
passwd = getpass.getpass("Password: ")
pre_cred = user_email + ":" + passwd
if not parsed_cred:
arguments.error("invalid credential: %s" % (
args_parsed.cred if args_parsed.cred else (user_email + ":*******")
))
args_parsed.cred = parsed_cred
else:
if args_parsed.no_cookies:
arguments.error("invalid option: `--no-cookies` is valid only if you use the `--cred` option")
pre_cred = "daniel.data@thebridgeschool.es:Productitos2020"
parsed_cred = SafariBooks.parse_cred(pre_cred)
args_parsed = argparse.Namespace()
SafariBooks(args_parsed)
# Hint: do you want to download more then one book once, initialized more than one instance of `SafariBooks`...
sys.exit(0)
"""
|
igv.py
|
### This script was written by Brent Pedersen
### https://github.com/brentp/bio-playground/blob/master/igv/igv.py
import socket
import os.path as op
import os
import sys
class IGV(object):
r"""
Simple wrapper to the IGV (http://www.broadinstitute.org/software/igv/home)
socket interface (http://www.broadinstitute.org/software/igv/PortCommands)
requires:
1) you have IGV running on your machine (launch with webstart here:
http://www.broadinstitute.org/software/igv/download)
2) you have enabled port communication in
View -> Preferences... -> Advanced
Successful commands return 'OK'
example usage:
>>> igv = IGV()
>>> igv.genome('hg19')
'OK'
#>>> igv.load('http://www.broadinstitute.org/igvdata/1KG/pilot2Bams/NA12878.SLX.bam')
'OK'
>>> igv.go('chr1:45,600-45,800')
'OK'
#save as svg, png, or jpg
>>> igv.save('/tmp/r/region.svg')
'OK'
>>> igv.save('/tmp/r/region.png')
'OK'
# go to a gene name.
>>> igv.go('muc5b')
'OK'
>>> igv.sort()
'OK'
>>> igv.save('muc5b.png')
'OK'
# get a list of commands that will work as an IGV batch script.
>>> print "\n".join(igv.commands)
snapshotDirectory /tmp/igv
genome hg19
goto chr1:45,600-45,800
snapshotDirectory /tmp/r
snapshot region.svg
snapshot region.png
goto muc5b
sort base
snapshot muc5b.png
Note, there will be some delay as the browser has to load the annotations
at each step.
"""
_socket = None
_path = None
def __init__(self, host='127.0.0.1', port=60151, snapshot_dir='/tmp/igv'):
self.host = host
self.port = port
self.commands = []
self.connect()
self.set_path(snapshot_dir)
@classmethod
def start(cls, jnlp="igv.jnlp", url="http://www.broadinstitute.org/igv/projects/current/"):
import subprocess
from threading import Thread
import time
def readit(ffrom, fto, wait):
for line in iter(ffrom.readline, b''):
line = line.decode()
if "Listening on port" in line:
wait[0] = False
fto.write(line + '\n')
ffrom.close()
p = subprocess.Popen("/usr/bin/javaws -Xnosplash %s%s" % (url, jnlp),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
wait = [True]
_tout = Thread(target=readit, args=(p.stdout, sys.stdout, wait))
_terr = Thread(target=readit, args=(p.stderr, sys.stderr, wait))
_tout.daemon = _terr.deamon = True
_tout.start()
_terr.start()
while p.poll() is None and wait[0]:
time.sleep(10)
print ("waiting", wait)
def connect(self):
if self._socket: self._socket.close()
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((self.host, self.port))
def go(self, position):
return self.send('goto ' + position)
goto = go
def genome(self, name):
return self.send('genome ' + name)
def load(self, url):
return self.send('load ' + url)
def sort(self, option='base'):
"""
options is one of: base, position, strand, quality, sample, and
readGroup.
"""
assert option in ("base", "position", "strand", "quality", "sample",
"readGroup")
return self.send('sort ' + option)
def set_path(self, snapshot_dir):
if snapshot_dir == self._path: return
if not op.exists(snapshot_dir):
os.makedirs(snapshot_dir)
self.send('snapshotDirectory %s' % snapshot_dir)
self._path = snapshot_dir
def expand(self, track):
self.send('expand %s' % track)
def collapse(self, track):
self.send('collapse %s' % track)
def clear(self):
self.send('clear')
def send(self, cmd):
self.commands.append(cmd)
cmd = cmd + '\n'
cmd = cmd.encode()
self._socket.send(cmd)
print(cmd)
ret = self._socket.recv(4096).decode()
ret = ret.rstrip('\n')
print("ret:", ret)
return ret
def save(self, path=None):
if path is not None:
# igv assumes the path is just a single filename, but
# we can set the snapshot dir. then just use the filename.
dirname = op.dirname(path)
if dirname:
self.set_path(dirname)
return self.send('snapshot ' + op.basename(path))
else:
return self.send('snapshot')
snapshot = save
if __name__ == "__main__":
import doctest
doctest.testmod()
|
coach.py
|
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
sys.path.append('.')
import copy
from configparser import ConfigParser, Error
import os
from rl_coach import logger
import traceback
from rl_coach.logger import screen, failed_imports
import argparse
import atexit
import time
import sys
import json
from rl_coach.base_parameters import Frameworks, VisualizationParameters, TaskParameters, DistributedTaskParameters, \
RunType, DistributedCoachSynchronizationType
from rl_coach.core_types import TotalStepsCounter, RunPhase, PlayingStepsType, TrainingSteps, EnvironmentEpisodes, \
EnvironmentSteps, StepMethod, Transition
from multiprocessing import Process
from multiprocessing.managers import BaseManager
import subprocess
from glob import glob
from rl_coach.graph_managers.graph_manager import HumanPlayScheduleParameters, GraphManager
from rl_coach.utils import list_all_presets, short_dynamic_import, get_open_port, SharedMemoryScratchPad, \
get_base_dir, set_gpu
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.environments.environment import SingleLevelSelection
from rl_coach.memories.backend.redis import RedisPubSubMemoryBackendParameters
from rl_coach.memories.backend.memory_impl import construct_memory_params
from rl_coach.data_stores.data_store import DataStoreParameters
from rl_coach.data_stores.s3_data_store import S3DataStoreParameters
from rl_coach.data_stores.nfs_data_store import NFSDataStoreParameters
from rl_coach.data_stores.redis_data_store import RedisDataStoreParameters
from rl_coach.data_stores.data_store_impl import get_data_store, construct_data_store_params
from rl_coach.training_worker import training_worker
from rl_coach.rollout_worker import rollout_worker
from rl_coach.schedules import *
from rl_coach.exploration_policies.e_greedy import *
if len(set(failed_imports)) > 0:
screen.warning("Warning: failed to import the following packages - {}".format(', '.join(set(failed_imports))))
def _get_cuda_available_devices():
import ctypes
try:
devices = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
return [] if devices[0] == '' else [int(i) for i in devices]
except KeyError:
pass
try:
cuda_lib = ctypes.CDLL('libcuda.so')
except OSError:
return []
CUDA_SUCCESS = 0
num_gpus = ctypes.c_int()
result = cuda_lib.cuInit(0)
if result != CUDA_SUCCESS:
return []
result = cuda_lib.cuDeviceGetCount(ctypes.byref(num_gpus))
if result != CUDA_SUCCESS:
return []
return list(range(num_gpus.value))
def add_items_to_dict(target_dict, source_dict):
updated_task_parameters = copy.copy(source_dict)
updated_task_parameters.update(target_dict)
return updated_task_parameters
def open_dashboard(experiment_path):
"""
open X11 based dashboard in a new process (nonblocking)
"""
dashboard_path = 'python {}/dashboard.py'.format(get_base_dir())
cmd = "{} --experiment_dir {}".format(dashboard_path, experiment_path)
screen.log_title("Opening dashboard - experiment path: {}".format(experiment_path))
# subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True, executable="bash")
subprocess.Popen(cmd, shell=True, executable="bash")
def start_graph(graph_manager: 'GraphManager', task_parameters: 'TaskParameters'):
"""
Runs the graph_manager using the configured task_parameters.
This stand-alone method is a convenience for multiprocessing.
"""
graph_manager.create_graph(task_parameters)
# let the adventure begin
if task_parameters.evaluate_only is not None:
steps_to_evaluate = task_parameters.evaluate_only if task_parameters.evaluate_only > 0 \
else sys.maxsize
graph_manager.evaluate(EnvironmentSteps(steps_to_evaluate))
else:
graph_manager.improve()
graph_manager.close()
def handle_distributed_coach_tasks(graph_manager, args, task_parameters):
ckpt_inside_container = "/checkpoint"
memory_backend_params = None
if args.memory_backend_params:
memory_backend_params = json.loads(args.memory_backend_params)
memory_backend_params['run_type'] = str(args.distributed_coach_run_type)
graph_manager.agent_params.memory.register_var('memory_backend_params', construct_memory_params(memory_backend_params))
data_store = None
data_store_params = None
if args.data_store_params:
data_store_params = construct_data_store_params(json.loads(args.data_store_params))
data_store_params.expt_dir = args.experiment_path
data_store_params.checkpoint_dir = ckpt_inside_container
graph_manager.data_store_params = data_store_params
data_store = get_data_store(data_store_params)
if args.distributed_coach_run_type == RunType.TRAINER:
task_parameters.checkpoint_save_dir = ckpt_inside_container
training_worker(
graph_manager=graph_manager,
data_store=data_store,
task_parameters=task_parameters,
is_multi_node_test=args.is_multi_node_test
)
if args.distributed_coach_run_type == RunType.ROLLOUT_WORKER:
rollout_worker(
graph_manager=graph_manager,
data_store=data_store,
num_workers=args.num_workers,
task_parameters=task_parameters
)
def handle_distributed_coach_orchestrator(args):
from rl_coach.orchestrators.kubernetes_orchestrator import KubernetesParameters, Kubernetes, \
RunTypeParameters
ckpt_inside_container = "/checkpoint"
arg_list = sys.argv[1:]
try:
i = arg_list.index('--distributed_coach_run_type')
arg_list.pop(i)
arg_list.pop(i)
except ValueError:
pass
trainer_command = ['python3', 'rl_coach/coach.py', '--distributed_coach_run_type', str(RunType.TRAINER)] + arg_list
rollout_command = ['python3', 'rl_coach/coach.py', '--distributed_coach_run_type', str(RunType.ROLLOUT_WORKER)] + arg_list
if '--experiment_name' not in rollout_command:
rollout_command = rollout_command + ['--experiment_name', args.experiment_name]
if '--experiment_name' not in trainer_command:
trainer_command = trainer_command + ['--experiment_name', args.experiment_name]
memory_backend_params = None
if args.memory_backend == "redispubsub":
memory_backend_params = RedisPubSubMemoryBackendParameters()
ds_params_instance = None
if args.data_store == "s3":
ds_params = DataStoreParameters("s3", "", "")
ds_params_instance = S3DataStoreParameters(ds_params=ds_params, end_point=args.s3_end_point, bucket_name=args.s3_bucket_name,
creds_file=args.s3_creds_file, checkpoint_dir=ckpt_inside_container, expt_dir=args.experiment_path)
elif args.data_store == "nfs":
ds_params = DataStoreParameters("nfs", "kubernetes", "")
ds_params_instance = NFSDataStoreParameters(ds_params)
elif args.data_store == "redis":
ds_params = DataStoreParameters("redis", "kubernetes", "")
ds_params_instance = RedisDataStoreParameters(ds_params)
else:
raise ValueError("data_store {} found. Expected 's3' or 'nfs'".format(args.data_store))
worker_run_type_params = RunTypeParameters(args.image, rollout_command, run_type=str(RunType.ROLLOUT_WORKER), num_replicas=args.num_workers)
trainer_run_type_params = RunTypeParameters(args.image, trainer_command, run_type=str(RunType.TRAINER))
orchestration_params = KubernetesParameters([worker_run_type_params, trainer_run_type_params],
kubeconfig='~/.kube/config',
memory_backend_parameters=memory_backend_params,
data_store_params=ds_params_instance)
orchestrator = Kubernetes(orchestration_params)
if not orchestrator.setup(args.checkpoint_restore_dir):
screen.print("Could not setup.")
return 1
if orchestrator.deploy_trainer():
screen.print("Successfully deployed trainer.")
else:
screen.print("Could not deploy trainer.")
return 1
if orchestrator.deploy_worker():
screen.print("Successfully deployed rollout worker(s).")
else:
screen.print("Could not deploy rollout worker(s).")
return 1
if args.dump_worker_logs:
screen.log_title("Dumping rollout worker logs in: {}".format(args.experiment_path))
orchestrator.worker_logs(path=args.experiment_path)
exit_code = 1
try:
exit_code = orchestrator.trainer_logs()
except KeyboardInterrupt:
pass
orchestrator.undeploy()
return exit_code
class CoachLauncher(object):
"""
This class is responsible for gathering all user-specified configuration options, parsing them,
instantiating a GraphManager and then starting that GraphManager with either improve() or evaluate().
This class is also responsible for launching multiple processes.
It is structured so that it can be sub-classed to provide alternate mechanisms to configure and launch
Coach jobs.
The key entry-point for this class is the .launch() method which is expected to be called from __main__
and handle absolutely everything for a job.
"""
gpus = _get_cuda_available_devices()
def launch(self):
"""
Main entry point for the class, and the standard way to run coach from the command line.
Parses command-line arguments through argparse, instantiates a GraphManager and then runs it.
"""
parser = self.get_argument_parser()
args = self.get_config_args(parser)
graph_manager = self.get_graph_manager_from_args(args)
self.run_graph_manager(graph_manager, args)
def get_graph_manager_from_args(self, args: argparse.Namespace) -> 'GraphManager':
"""
Return the graph manager according to the command line arguments given by the user.
:param args: the arguments given by the user
:return: the graph manager, not bound to task_parameters yet.
"""
graph_manager = None
# if a preset was given we will load the graph manager for the preset
if args.preset is not None:
graph_manager = short_dynamic_import(args.preset, ignore_module_case=True)
# for human play we need to create a custom graph manager
if args.play:
from rl_coach.agents.human_agent import HumanAgentParameters
env_params = short_dynamic_import(args.environment_type, ignore_module_case=True)()
env_params.human_control = True
schedule_params = HumanPlayScheduleParameters()
graph_manager = BasicRLGraphManager(HumanAgentParameters(), env_params, schedule_params, VisualizationParameters())
# Set framework
# Note: Some graph managers (e.g. HAC preset) create multiple agents and the attribute is called agents_params
if hasattr(graph_manager, 'agent_params'):
for network_parameters in graph_manager.agent_params.network_wrappers.values():
network_parameters.framework = args.framework
elif hasattr(graph_manager, 'agents_params'):
for ap in graph_manager.agents_params:
for network_parameters in ap.network_wrappers.values():
network_parameters.framework = args.framework
if args.level:
if isinstance(graph_manager.env_params.level, SingleLevelSelection):
graph_manager.env_params.level.select(args.level)
else:
graph_manager.env_params.level = args.level
# set the seed for the environment
if args.seed is not None and graph_manager.env_params is not None:
graph_manager.env_params.seed = args.seed
# visualization
graph_manager.visualization_parameters.dump_gifs = graph_manager.visualization_parameters.dump_gifs or args.dump_gifs
graph_manager.visualization_parameters.dump_mp4 = graph_manager.visualization_parameters.dump_mp4 or args.dump_mp4
graph_manager.visualization_parameters.render = args.render
graph_manager.visualization_parameters.tensorboard = args.tensorboard
graph_manager.visualization_parameters.print_networks_summary = args.print_networks_summary
# update the custom parameters
if args.custom_parameter is not None:
unstripped_key_value_pairs = [pair.split('=') for pair in args.custom_parameter.split(';')]
stripped_key_value_pairs = [tuple([pair[0].strip(), pair[1].strip()]) for pair in
unstripped_key_value_pairs if len(pair) == 2]
# load custom parameters into run_dict
for key, value in stripped_key_value_pairs:
exec("graph_manager.{}={}".format(key, value))
return graph_manager
def display_all_presets_and_exit(self):
# list available presets
screen.log_title("Available Presets:")
for preset in sorted(list_all_presets()):
print(preset)
sys.exit(0)
def expand_preset(self, preset):
"""
Replace a short preset name with the full python path, and verify that it can be imported.
"""
if preset.lower() in [p.lower() for p in list_all_presets()]:
preset = "{}.py:graph_manager".format(os.path.join(get_base_dir(), 'presets', preset))
else:
preset = "{}".format(preset)
# if a graph manager variable was not specified, try the default of :graph_manager
if len(preset.split(":")) == 1:
preset += ":graph_manager"
# verify that the preset exists
preset_path = preset.split(":")[0]
if not os.path.exists(preset_path):
screen.error("The given preset ({}) cannot be found.".format(preset))
# verify that the preset can be instantiated
try:
short_dynamic_import(preset, ignore_module_case=True)
except TypeError as e:
traceback.print_exc()
screen.error('Internal Error: ' + str(e) + "\n\nThe given preset ({}) cannot be instantiated."
.format(preset))
return preset
def get_config_args(self, parser: argparse.ArgumentParser, arguments=None) -> argparse.Namespace:
"""
Returns a Namespace object with all the user-specified configuration options needed to launch.
This implementation uses argparse to take arguments from the CLI, but this can be over-ridden by
another method that gets its configuration from elsewhere. An equivalent method however must
return an identically structured Namespace object, which conforms to the structure defined by
get_argument_parser.
This method parses the arguments that the user entered, does some basic validation, and
modification of user-specified values in short form to be more explicit.
:param parser: a parser object which implicitly defines the format of the Namespace that
is expected to be returned.
:param arguments: command line arguments
:return: the parsed arguments as a Namespace
"""
if arguments is None:
args = parser.parse_args()
else:
args = parser.parse_args(arguments)
if args.nocolor:
screen.set_use_colors(False)
# if no arg is given
if (len(sys.argv) == 1 and arguments is None) or (arguments is not None and len(arguments) <= 2):
parser.print_help()
sys.exit(1)
# list available presets
if args.list:
self.display_all_presets_and_exit()
# Read args from config file for distributed Coach.
if args.distributed_coach and args.distributed_coach_run_type == RunType.ORCHESTRATOR:
coach_config = ConfigParser({
'image': '',
'memory_backend': 'redispubsub',
'data_store': 's3',
's3_end_point': 's3.amazonaws.com',
's3_bucket_name': '',
's3_creds_file': ''
})
try:
coach_config.read(args.distributed_coach_config_path)
args.image = coach_config.get('coach', 'image')
args.memory_backend = coach_config.get('coach', 'memory_backend')
args.data_store = coach_config.get('coach', 'data_store')
if args.data_store == 's3':
args.s3_end_point = coach_config.get('coach', 's3_end_point')
args.s3_bucket_name = coach_config.get('coach', 's3_bucket_name')
args.s3_creds_file = coach_config.get('coach', 's3_creds_file')
except Error as e:
screen.error("Error when reading distributed Coach config file: {}".format(e))
if args.image == '':
screen.error("Image cannot be empty.")
data_store_choices = ['s3', 'nfs', 'redis']
if args.data_store not in data_store_choices:
screen.warning("{} data store is unsupported.".format(args.data_store))
screen.error("Supported data stores are {}.".format(data_store_choices))
memory_backend_choices = ['redispubsub']
if args.memory_backend not in memory_backend_choices:
screen.warning("{} memory backend is not supported.".format(args.memory_backend))
screen.error("Supported memory backends are {}.".format(memory_backend_choices))
if args.data_store == 's3':
if args.s3_bucket_name == '':
screen.error("S3 bucket name cannot be empty.")
if args.s3_creds_file == '':
args.s3_creds_file = None
if args.play and args.distributed_coach:
screen.error("Playing is not supported in distributed Coach.")
# replace a short preset name with the full path
if args.preset is not None:
args.preset = self.expand_preset(args.preset)
# validate the checkpoints args
if args.checkpoint_restore_dir is not None and not os.path.exists(args.checkpoint_restore_dir):
# If distributed trainer, the checkpoint dir is not yet available so skipping the check in that case.
if not (args.distributed_coach and args.distributed_coach_run_type in [RunType.TRAINER, RunType.ROLLOUT_WORKER]):
screen.error("The requested checkpoint folder to load from does not exist.")
# validate the checkpoints args
if args.checkpoint_restore_file is not None and not glob(args.checkpoint_restore_file + '*'):
screen.error("The requested checkpoint file to load from does not exist.")
# no preset was given. check if the user requested to play some environment on its own
if args.preset is None and args.play and not args.environment_type:
screen.error('When no preset is given for Coach to run, and the user requests human control over '
'the environment, the user is expected to input the desired environment_type and level.'
'\nAt least one of these parameters was not given.')
elif args.preset and args.play:
screen.error("Both the --preset and the --play flags were set. These flags can not be used together. "
"For human control, please use the --play flag together with the environment type flag (-et)")
elif args.preset is None and not args.play:
screen.error("Please choose a preset using the -p flag or use the --play flag together with choosing an "
"environment type (-et) in order to play the game.")
# get experiment name and path
args.experiment_name = logger.get_experiment_name(args.experiment_name)
args.experiment_path = logger.get_experiment_path(args.experiment_name, args.experiment_path)
if args.play and args.num_workers > 1:
screen.warning("Playing the game as a human is only available with a single worker. "
"The number of workers will be reduced to 1")
args.num_workers = 1
args.framework = Frameworks[args.framework.lower()]
# checkpoints
args.checkpoint_save_dir = os.path.join(args.experiment_path, 'checkpoint') if args.checkpoint_save_secs is not None else None
if args.export_onnx_graph and not args.checkpoint_save_secs:
screen.warning("Exporting ONNX graphs requires setting the --checkpoint_save_secs flag. "
"The --export_onnx_graph will have no effect.")
if args.use_cpu or not CoachLauncher.gpus:
CoachLauncher.gpus = [None]
return args
def get_argument_parser(self) -> argparse.ArgumentParser:
"""
This returns an ArgumentParser object which defines the set of options that customers are expected to supply in order
to launch a coach job.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--preset',
help="(string) Name of a preset to run (class name from the 'presets' directory.)",
default=None,
type=str)
parser.add_argument('-l', '--list',
help="(flag) List all available presets",
action='store_true')
parser.add_argument('-e', '--experiment_name',
help="(string) Experiment name to be used to store the results.",
default=None,
type=str)
parser.add_argument('-ep', '--experiment_path',
help="(string) Path to experiments folder.",
default=None,
type=str)
parser.add_argument('-r', '--render',
help="(flag) Render environment",
action='store_true')
parser.add_argument('-f', '--framework',
help="(string) Neural network framework. Available values: tensorflow, mxnet",
default='tensorflow',
type=str)
parser.add_argument('-n', '--num_workers',
help="(int) Number of workers for multi-process based agents, e.g. A3C",
default=1,
type=int)
parser.add_argument('-c', '--use_cpu',
help="(flag) Use only the cpu for training. If a GPU is not available, this flag will have no "
"effect and the CPU will be used either way.",
action='store_true')
parser.add_argument('-ew', '--evaluation_worker',
help="(flag) If multiple workers are used, add an evaluation worker as well which will "
"evaluate asynchronously and independently during the training. NOTE: this worker will "
"ignore the evaluation settings in the preset's ScheduleParams.",
action='store_true')
parser.add_argument('--play',
help="(flag) Play as a human by controlling the game with the keyboard. "
"This option will save a replay buffer with the game play.",
action='store_true')
parser.add_argument('--evaluate',
help="(int) Run evaluation only, for at least the given number of steps (note that complete "
"episodes are evaluated). This is a convenient way to disable training in order "
"to evaluate an existing checkpoint. If value is 0, or no value is provided, "
"evaluation will run for an infinite number of steps.",
nargs='?',
const=0,
type=int)
parser.add_argument('-v', '--verbosity',
help="(flag) Sets the verbosity level of Coach print outs. Can be either low or high.",
default="low",
type=str)
parser.add_argument('-tfv', '--tf_verbosity',
help="(flag) TensorFlow verbosity level",
default=3,
type=int)
parser.add_argument('--nocolor',
help="(flag) Turn off color-codes in screen logging. Ascii text only",
action='store_true')
parser.add_argument('-s', '--checkpoint_save_secs',
help="(int) Time in seconds between saving checkpoints of the model.",
default=None,
type=int)
parser.add_argument('-crd', '--checkpoint_restore_dir',
help='(string) Path to a folder containing a checkpoint to restore the model from.',
type=str)
parser.add_argument('-crf', '--checkpoint_restore_file',
help='(string) Path to a checkpoint file to restore the model from.',
type=str)
parser.add_argument('-dg', '--dump_gifs',
help="(flag) Enable the gif saving functionality.",
action='store_true')
parser.add_argument('-dm', '--dump_mp4',
help="(flag) Enable the mp4 saving functionality.",
action='store_true')
parser.add_argument('-et', '--environment_type',
help="(string) Choose an environment type class to override on top of the selected preset.",
default=None,
type=str)
parser.add_argument('-lvl', '--level',
help="(string) Choose the level that will be played in the environment that was selected."
"This value will override the level parameter in the environment class."
,
default=None,
type=str)
parser.add_argument('-cp', '--custom_parameter',
help="(string) Semicolon separated parameters used to override specific parameters on top of"
" the selected preset (or on top of the command-line assembled one). "
"Whenever a parameter value is a string, it should be inputted as '\\\"string\\\"'. "
"For ex.: "
"\"visualization_parameters.render=False; heatup_steps=EnvironmentSteps(1000);"
"improve_steps=TrainingSteps(100000); optimizer='rmsprop'\"",
default=None,
type=str)
parser.add_argument('--print_networks_summary',
help="(flag) Print network summary to stdout",
action='store_true')
parser.add_argument('-tb', '--tensorboard',
help="(flag) When using the TensorFlow backend, enable TensorBoard log dumps. ",
action='store_true')
parser.add_argument('-ns', '--no_summary',
help="(flag) Prevent Coach from printing a summary and asking questions at the end of runs",
action='store_true')
parser.add_argument('-d', '--open_dashboard',
help="(flag) Open dashboard with the experiment when the run starts",
action='store_true')
parser.add_argument('--seed',
help="(int) A seed to use for running the experiment",
default=None,
type=int)
parser.add_argument('-onnx', '--export_onnx_graph',
help="(flag) Export the ONNX graph to the experiment directory. "
"This will have effect only if the --checkpoint_save_secs flag is used in order to store "
"checkpoints, since the weights checkpoint are needed for the ONNX graph. "
"Keep in mind that this can cause major overhead on the experiment. "
"Exporting ONNX graphs requires manually installing the tf2onnx package "
"(https://github.com/onnx/tensorflow-onnx).",
action='store_true')
parser.add_argument('-dc', '--distributed_coach',
help="(flag) Use distributed Coach.",
action='store_true')
parser.add_argument('-dcp', '--distributed_coach_config_path',
help="(string) Path to config file when using distributed rollout workers."
"Only distributed Coach parameters should be provided through this config file."
"Rest of the parameters are provided using Coach command line options."
"Used only with --distributed_coach flag."
"Ignored if --distributed_coach flag is not used.",
type=str)
parser.add_argument('--memory_backend_params',
help=argparse.SUPPRESS,
type=str)
parser.add_argument('--data_store_params',
help=argparse.SUPPRESS,
type=str)
parser.add_argument('--distributed_coach_run_type',
help=argparse.SUPPRESS,
type=RunType,
default=RunType.ORCHESTRATOR,
choices=list(RunType))
parser.add_argument('-asc', '--apply_stop_condition',
help="(flag) If set, this will apply a stop condition on the run, defined by reaching a"
"target success rate as set by the environment or a custom success rate as defined "
"in the preset. ",
action='store_true')
parser.add_argument('--dump_worker_logs',
help="(flag) Only used in distributed coach. If set, the worker logs are saved in the experiment dir",
action='store_true')
parser.add_argument('--is_multi_node_test',
help=argparse.SUPPRESS,
action='store_true')
return parser
def run_graph_manager(self, graph_manager: 'GraphManager', args: argparse.Namespace):
task_parameters = self.create_task_parameters(graph_manager, args)
if args.distributed_coach and args.distributed_coach_run_type != RunType.ORCHESTRATOR:
handle_distributed_coach_tasks(graph_manager, args, task_parameters)
return
# Single-threaded runs
if args.num_workers == 1:
self.start_single_process(task_parameters, graph_manager, args)
else:
self.start_multi_process(graph_manager, args)
@staticmethod
def create_task_parameters(graph_manager: 'GraphManager', args: argparse.Namespace):
if args.distributed_coach and not graph_manager.agent_params.algorithm.distributed_coach_synchronization_type:
screen.error(
"{} algorithm is not supported using distributed Coach.".format(graph_manager.agent_params.algorithm))
if args.distributed_coach and args.checkpoint_save_secs and graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.SYNC:
screen.warning(
"The --checkpoint_save_secs or -s argument will be ignored as SYNC distributed coach sync type is used. Checkpoint will be saved every training iteration.")
if args.distributed_coach and not args.checkpoint_save_secs and graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.ASYNC:
screen.error(
"Distributed coach with ASYNC distributed coach sync type requires --checkpoint_save_secs or -s.")
# Intel optimized TF seems to run significantly faster when limiting to a single OMP thread.
# This will not affect GPU runs.
os.environ["OMP_NUM_THREADS"] = "1"
# turn TF debug prints off
if args.framework == Frameworks.tensorflow:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_verbosity)
# turn off the summary at the end of the run if necessary
if not args.no_summary and not args.distributed_coach:
atexit.register(logger.summarize_experiment)
screen.change_terminal_title(args.experiment_name)
if args.checkpoint_restore_dir is not None and args.checkpoint_restore_file is not None:
raise ValueError("Only one of the checkpoint_restore_dir and checkpoint_restore_file arguments can be used"
" simulatenously.")
checkpoint_restore_path = args.checkpoint_restore_dir if args.checkpoint_restore_dir \
else args.checkpoint_restore_file
# open dashboard
if args.open_dashboard:
open_dashboard(args.experiment_path)
if args.distributed_coach and args.distributed_coach_run_type == RunType.ORCHESTRATOR:
exit(handle_distributed_coach_orchestrator(args))
task_parameters = TaskParameters(
framework_type=args.framework,
evaluate_only=args.evaluate,
experiment_path=args.experiment_path,
seed=args.seed,
use_cpu=args.use_cpu,
checkpoint_save_secs=args.checkpoint_save_secs,
checkpoint_restore_path=checkpoint_restore_path,
checkpoint_save_dir=args.checkpoint_save_dir,
export_onnx_graph=args.export_onnx_graph,
apply_stop_condition=args.apply_stop_condition
)
return task_parameters
@staticmethod
def start_single_process(task_parameters, graph_manager: 'GraphManager', args: argparse.Namespace):
# Start the training or evaluation
start_graph(graph_manager=graph_manager, task_parameters=task_parameters)
@staticmethod
def start_multi_process(graph_manager: 'GraphManager', args: argparse.Namespace):
total_tasks = args.num_workers
if args.evaluation_worker:
total_tasks += 1
ps_hosts = "localhost:{}".format(get_open_port())
worker_hosts = ",".join(["localhost:{}".format(get_open_port()) for i in range(total_tasks)])
# Shared memory
class CommManager(BaseManager):
pass
CommManager.register('SharedMemoryScratchPad', SharedMemoryScratchPad, exposed=['add', 'get', 'internal_call'])
comm_manager = CommManager()
comm_manager.start()
shared_memory_scratchpad = comm_manager.SharedMemoryScratchPad()
if args.checkpoint_restore_file:
raise ValueError("Multi-Process runs only support restoring checkpoints from a directory, "
"and not from a file. ")
def start_distributed_task(job_type, task_index, evaluation_worker=False,
shared_memory_scratchpad=shared_memory_scratchpad,
gpu_id=None):
task_parameters = DistributedTaskParameters(
framework_type=args.framework,
parameters_server_hosts=ps_hosts,
worker_hosts=worker_hosts,
job_type=job_type,
task_index=task_index,
evaluate_only=0 if evaluation_worker else None, # 0 value for evaluation worker as it should run infinitely
use_cpu=args.use_cpu,
num_tasks=total_tasks, # training tasks + 1 evaluation task
num_training_tasks=args.num_workers,
experiment_path=args.experiment_path,
shared_memory_scratchpad=shared_memory_scratchpad,
seed=args.seed+task_index if args.seed is not None else None, # each worker gets a different seed
checkpoint_save_secs=args.checkpoint_save_secs,
checkpoint_restore_path=args.checkpoint_restore_dir, # MonitoredTrainingSession only supports a dir
checkpoint_save_dir=args.checkpoint_save_dir,
export_onnx_graph=args.export_onnx_graph,
apply_stop_condition=args.apply_stop_condition
)
if gpu_id is not None:
set_gpu(gpu_id)
# we assume that only the evaluation workers are rendering
graph_manager.visualization_parameters.render = args.render and evaluation_worker
p = Process(target=start_graph, args=(graph_manager, task_parameters))
# p.daemon = True
p.start()
return p
# parameter server
parameter_server = start_distributed_task("ps", 0, gpu_id=CoachLauncher.gpus[0])
# training workers
# wait a bit before spawning the non chief workers in order to make sure the session is already created
curr_gpu_idx = 0
workers = []
workers.append(start_distributed_task("worker", 0, gpu_id=CoachLauncher.gpus[curr_gpu_idx]))
time.sleep(2)
for task_index in range(1, args.num_workers):
curr_gpu_idx = (curr_gpu_idx + 1) % len(CoachLauncher.gpus)
workers.append(start_distributed_task("worker", task_index, gpu_id=CoachLauncher.gpus[curr_gpu_idx]))
# evaluation worker
if args.evaluation_worker or args.render:
curr_gpu_idx = (curr_gpu_idx + 1) % len(CoachLauncher.gpus)
evaluation_worker = start_distributed_task("worker", args.num_workers, evaluation_worker=True,
gpu_id=CoachLauncher.gpus[curr_gpu_idx])
# wait for all workers
[w.join() for w in workers]
if args.evaluation_worker:
evaluation_worker.terminate()
parameter_server.terminate()
class CoachInterface(CoachLauncher):
"""
This class is used as an interface to use coach as library. It can take any of the command line arguments
(with the respective names) as arguments to the class.
"""
def __init__(self, **kwargs):
parser = self.get_argument_parser()
arguments = []
for key in kwargs:
arguments.append('--' + key)
arguments.append(str(kwargs[key]))
if '--experiment_name' not in arguments:
arguments.append('--experiment_name')
arguments.append('')
self.args = self.get_config_args(parser, arguments)
self.graph_manager = self.get_graph_manager_from_args(self.args)
if self.args.num_workers == 1:
task_parameters = self.create_task_parameters(self.graph_manager, self.args)
self.graph_manager.create_graph(task_parameters)
def run(self):
self.run_graph_manager(self.graph_manager, self.args)
def main():
launcher = CoachLauncher()
launcher.launch()
if __name__ == "__main__":
main()
|
test_decimal.py
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings,
requires_legacy_unicode_capi, check_sanitizer)
from test.support import (TestFailed,
run_with_locale, cpython_only,
darwin_malloc_err_warning)
from test.support.import_helper import import_fresh_module
from test.support import threading_helper
from test.support import warnings_helper
import random
import inspect
import threading
if sys.platform == 'darwin':
darwin_malloc_err_warning('test_decimal')
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
import decimal as orig_sys_decimal
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file, encoding="utf-8") as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# underscores
self.assertEqual(str(Decimal('1_3.3e4_0')), '1.33E+41')
self.assertEqual(str(Decimal('1_0_0_0')), '1000')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
# underscores don't prevent errors
self.assertRaises(InvalidOperation, Decimal, "1_2_\u00003")
@cpython_only
@requires_legacy_unicode_capi
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# no whitespace and underscore stripping is done with this method
self.assertRaises(InvalidOperation, nc.create_decimal, " 1234")
self.assertRaises(InvalidOperation, nc.create_decimal, "12_34")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# issue 22090
('<^+15.20%', 'inf', '<<+Infinity%<<<'),
('\x07>,%', 'sNaN1234567', 'sNaN1234567%'),
('=10.10%', 'NaN123', ' NaN123%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator '
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator '
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
def test_decimal_from_float_argument_type(self):
class A(self.decimal.Decimal):
def __init__(self, a):
self.a_type = type(a)
a = A.from_float(42.5)
self.assertEqual(self.decimal.Decimal, a.a_type)
a = A.from_float(42)
self.assertEqual(self.decimal.Decimal, a.a_type)
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
@threading_helper.requires_working_threading()
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
th1.join()
th2.join()
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
class CThreadingTest(ThreadingTest):
decimal = C
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hash(int(value)))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(d), hash(f))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_hash_method_nan(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, hash, Decimal('sNaN'))
value = Decimal('NaN')
self.assertEqual(hash(value), object.__hash__(value))
class H:
def __hash__(self):
return 42
class D(Decimal, H):
pass
value = D('NaN')
self.assertEqual(hash(value), object.__hash__(value))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_as_integer_ratio(self):
Decimal = self.decimal.Decimal
# exceptional cases
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('inf'))
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('-inf'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('-nan'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('snan123'))
for exp in range(-4, 2):
for coeff in range(1000):
for sign in '+', '-':
d = Decimal('%s%dE%d' % (sign, coeff, exp))
pq = d.as_integer_ratio()
p, q = pq
# check return type
self.assertIsInstance(pq, tuple)
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
# check normalization: q should be positive;
# p should be relatively prime to q.
self.assertGreater(q, 0)
self.assertEqual(math.gcd(p, q), 1)
# check that p/q actually gives the correct value
self.assertEqual(Decimal(p) / Decimal(q), d)
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d, proto)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
@cpython_only
def test_small_ints(self):
Decimal = self.decimal.Decimal
# bpo-46361
for x in range(-5, 257):
self.assertIs(int(Decimal(x)), x)
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
def __init__(self, _):
self.x = 'y'
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertEqual(r.x, 'y')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
@requires_legacy_unicode_capi
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c, proto))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c, proto)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(dir(C), dir(P))
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
FloatOperation = C.FloatOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
FloatOperation: C.DecFloatOperation,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000007')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
def test_internal_use_of_overridden_methods(self):
Decimal = C.Decimal
# Unsound subtyping
class X(float):
def as_integer_ratio(self):
return 1
def __abs__(self):
return self
class Y(float):
def __abs__(self):
return [1]*200
class I(int):
def bit_length(self):
return [1]*200
class Z(float):
def as_integer_ratio(self):
return (I(1), I(1))
def __abs__(self):
return self
for cls in X, Y, Z:
self.assertEqual(Decimal.from_float(cls(101.1)),
Decimal.from_float(101.1))
# Issue 41540:
@unittest.skipIf(sys.platform.startswith("aix"),
"AIX: default ulimit: test is flaky because of extreme over-allocation")
@unittest.skipIf(check_sanitizer(address=True, memory=True),
"ASAN/MSAN sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_maxcontext_exact_arith(self):
# Make sure that exact operations do not raise MemoryError due
# to huge intermediate values when the context precision is very
# large.
# The following functions fill the available precision and are
# therefore not suitable for large precisions (by design of the
# specification).
MaxContextSkip = ['logical_invert', 'next_minus', 'next_plus',
'logical_and', 'logical_or', 'logical_xor',
'next_toward', 'rotate', 'shift']
Decimal = C.Decimal
Context = C.Context
localcontext = C.localcontext
# Here only some functions that are likely candidates for triggering a
# MemoryError are tested. deccheck.py has an exhaustive test.
maxcontext = Context(prec=C.MAX_PREC, Emin=C.MIN_EMIN, Emax=C.MAX_EMAX)
with localcontext(maxcontext):
self.assertEqual(Decimal(0).exp(), 1)
self.assertEqual(Decimal(1).ln(), 0)
self.assertEqual(Decimal(1).log10(), 0)
self.assertEqual(Decimal(10**2).log10(), 2)
self.assertEqual(Decimal(10**223).log10(), 223)
self.assertEqual(Decimal(10**19).logb(), 19)
self.assertEqual(Decimal(4).sqrt(), 2)
self.assertEqual(Decimal("40E9").sqrt(), Decimal('2.0E+5'))
self.assertEqual(divmod(Decimal(10), 3), (3, 1))
self.assertEqual(Decimal(10) // 3, 3)
self.assertEqual(Decimal(4) / 2, 2)
self.assertEqual(Decimal(400) ** -1, Decimal('0.0025'))
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
class SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
for attr in dir(P):
if attr.startswith('_'):
continue
p_func = getattr(P, attr)
c_func = getattr(C, attr)
if (attr == 'Decimal' or attr == 'Context' or
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x for x in p_sig.parameters.keys() if not
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
c_kind = [x.kind for x in c_sig.parameters.values()]
p_kind = [x[1].kind for x in p_sig.parameters.items() if not
x[0].startswith('_')]
# parameters:
if attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch in %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
for name, param in sig.parameters.items():
if name == 'self': continue
if param.kind == POS:
args.append(pdict[module][name])
elif param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
else:
raise TestFailed("unexpected parameter kind")
return args, kwargs
def tr(s):
"""The C Context docstrings use 'x' in order to prevent confusion
with the article 'a' in the descriptions."""
if s == 'x': return 'a'
if s == 'y': return 'b'
if s == 'z': return 'c'
return s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
for attr in dir(p_type):
if attr.startswith('_'):
continue
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
if inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) for x in c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
p_kind = [x.kind for x in p_sig.parameters.values()]
c_kind = [x.kind for x in c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
if ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch in %s" % p_func)
else: # Context methods are positional only in the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch in %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
try:
getattr(c_type(9), attr)(*args, **kwds)
except Exception:
raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
try:
getattr(p_type(9), attr)(*args, **kwds)
except Exception:
raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
filter.py
|
#! /usr/bin/python3
import errno, socket
import threading
from queue import Queue
import time
import random
_port_from = 27020
_port_to = 27036
_ip = 'csko.cz'
_mod = 'cstrike'
_threads = 10
# lock to serialize console output
lock = threading.Lock()
def prefix():
return "[pFilter message]: "
def parsing(data):
"""Parses recieved data to readable form. Null('\0') is delimiter
Returns list of data."""
data = data.replace(b'\377',b'')
data = data.decode('UTF-8')
li = data.split('\0')
return li
def pf(port, ip, mod):
osock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
osock.connect((ip,port))
except socket.gaierror:
return ("[error]", "No internet connection")
osock.settimeout(1.5)
osock.send(b'\377\377\377\377TSource Engine Query\0')
while 1:
data = ''
try:
data = osock.recv(1024)
except socket.error as e:
if e.errno ==errno.ECONNREFUSED:
err_str = prefix()+"connection to port %d refused" % (port)
return ("[error]", err_str)
if not data:
err_str = prefix()+"no data recieved from port "+str(port)
return ("[error]", err_str)
else:
li = parsing(data)
if li[3] != mod:
err_str = prefix()+"there's another mod(%s) on port %d!" % (li[3],port)
return ("[error]",err_str)
else:
return (port, li[1])
osock.close()
def src_query(ip, mod, port_from, port_to, threads=10):
result = []
q = Queue()
# The worker thread pulls an item from the queue and processes it
def worker():
while True:
item = q.get()
# print(*item)
result.append(pf(*item))
q.task_done()
def solver(data):
# Solve errors in filtered data, return final list
servers = []
errors = []
for server in data:
if server[0] != "[error]":
servers.append((server[1],server[0]))
else:
errors.append(server[1])
try:
host_alias = socket.gethostbyname(ip)
except socket.gaierror:
host_alias = "unreachable"
return (servers, errors, host_alias)
# Create the queue and thread pool.
for i in range(threads):
t = threading.Thread(target=worker)
t.daemon = True # thread dies when main thread (only non-daemon thread) exits.
t.start()
# stuff work items on the queue (in this case, just a number).
for port in range(port_from,port_to):
q.put((port, ip, mod))
q.join() # block until all tasks are done
return solver(result)
def main():
start = time.perf_counter()
q = src_query(_ip, _mod, _port_from, _port_to, _threads)
print("using func src_query()_____________________________________________")
for s in q[0]:
print("Server: " + s[0].ljust(40, " ") + "Port:" + str(s[1]))
for e in q[1]:
print(e)
print('time: %f with %d threads'%(time.perf_counter() - start,_threads))
if __name__ == '__main__':
main()
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import os
import re
import copy
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management
from django.core import signals
from django.core.cache import (cache, caches, CacheKeyWarning,
InvalidCacheBackendError, DEFAULT_CACHE_ALIAS, get_cache,
close_caches)
from django.db import connection, connections, transaction
from django.core.cache.utils import make_template_fragment_key
from django.http import HttpResponse, StreamingHttpResponse
from django.middleware.cache import (FetchFromCacheMiddleware,
UpdateCacheMiddleware, CacheMiddleware)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import Template
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (TestCase, TransactionTestCase, RequestFactory,
ignore_warnings, override_settings)
from django.test.signals import setting_changed
from django.utils import six
from django.utils import timezone
from django.utils import translation
from django.utils.cache import (patch_vary_headers, get_cache_key,
learn_cache_key, patch_cache_control, patch_response_headers)
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = {k: base.copy() for k in _caches_setting_base.keys()}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertEqual(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.set('unpickable', Unpickable())
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = six.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' created.\n")
def test_clear_commits_transaction(self):
# Ensure the database transaction is committed (#19896)
cache.set("key1", "spam")
cache.clear()
transaction.rollback()
self.assertIsNone(cache.get("key1"))
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def allow_migrate(self, db, model):
if model._meta.app_label == 'django_cache':
return db == 'other'
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
memcached_never_expiring_params = memcached_params.copy()
memcached_never_expiring_params['TIMEOUT'] = None
memcached_far_future_params = memcached_params.copy()
memcached_far_future_params['TIMEOUT'] = 31536000 # 60*60*24*365, 1 year
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache_config in settings.CACHES.items():
if cache_config['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(base=memcached_never_expiring_params))
def test_default_never_expiring_timeout(self):
# Regression test for #22845
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
@override_settings(CACHES=caches_setting_for_tests(base=memcached_far_future_params))
def test_default_far_future_timeout(self):
# Regression test for #22845
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
cache.set('small_value', large_value)
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class GetCacheTests(TestCase):
@ignore_warnings(category=RemovedInDjango19Warning)
def test_simple(self):
self.assertIsInstance(
caches[DEFAULT_CACHE_ALIAS],
get_cache('default').__class__
)
cache = get_cache(
'django.core.cache.backends.dummy.DummyCache',
**{'TIMEOUT': 120}
)
self.assertEqual(cache.default_timeout, 120)
self.assertRaises(InvalidCacheBackendError, get_cache, 'does_not_exist')
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
@ignore_warnings(category=RemovedInDjango19Warning)
def test_close_deprecated(self):
cache = get_cache('cache.closeable_cache.CacheClass')
self.assertFalse(cache.closed)
# Ensure that we don't close the global cache instances.
signals.request_finished.disconnect(close_caches)
try:
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
finally:
signals.request_finished.connect(close_caches)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(TestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=(
('en', 'English'),
('es', 'Spanish'),
),
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(TestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = TemplateResponse(HttpResponse(), Template("This is a test"))
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = TemplateResponse(HttpResponse(), Template("This is a test"))
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = TemplateResponse(HttpResponse(), Template("This is a test"))
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(TestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
self_test.py
|
# -*- coding: utf-8 -*-
"""
This action performs internal hardware & software tests of the system to confirm things work as expected.
Functions with prefix `test_` are ran, and any exception thrown means the test failed.
Outputs from each test go into MQTT, and return to the command line.
"""
from __future__ import annotations
from json import dumps
from json import loads
from threading import Thread
from time import sleep
from typing import Callable
from typing import cast
import click
from pioreactor.actions.led_intensity import ALL_LED_CHANNELS
from pioreactor.actions.led_intensity import change_leds_intensities_temporarily
from pioreactor.actions.led_intensity import led_intensity
from pioreactor.background_jobs import stirring
from pioreactor.background_jobs.od_reading import ADCReader
from pioreactor.background_jobs.od_reading import ALL_PD_CHANNELS
from pioreactor.background_jobs.od_reading import IR_keyword
from pioreactor.background_jobs.temperature_control import TemperatureController
from pioreactor.config import config
from pioreactor.hardware import is_HAT_present
from pioreactor.hardware import is_heating_pcb_present
from pioreactor.logging import create_logger
from pioreactor.logging import Logger
from pioreactor.pubsub import publish
from pioreactor.types import LedChannel
from pioreactor.types import PdChannel
from pioreactor.utils import is_pio_job_running
from pioreactor.utils import local_persistant_storage
from pioreactor.utils import publish_ready_to_disconnected_state
from pioreactor.utils.math_helpers import correlation
from pioreactor.whoami import get_latest_experiment_name
from pioreactor.whoami import get_latest_testing_experiment_name
from pioreactor.whoami import get_unit_name
from pioreactor.whoami import is_testing_env
def test_pioreactor_HAT_present(logger: Logger, unit: str, experiment: str) -> None:
assert is_HAT_present()
def test_all_positive_correlations_between_pds_and_leds(
logger: Logger, unit: str, experiment: str
) -> None:
"""
This tests that there is a positive correlation between the IR LED channel, and the photodiodes
as defined in the config.ini.
TODO: if this exits early, we should turn off the LEDs
"""
from pprint import pformat
INTENSITIES = list(
range(10, 50, 5)
) # better to err on the side of MORE samples than less - it's only a few extra seconds...
current_experiment_name = get_latest_experiment_name()
results: dict[tuple[LedChannel, PdChannel], float] = {}
adc_reader = ADCReader(
channels=ALL_PD_CHANNELS,
dynamic_gain=False,
initial_gain=16, # I think a small gain is okay, since we only varying the lower-end of LED intensity
fake_data=is_testing_env(),
).setup_adc()
# set all to 0, but use original experiment name, since we indeed are setting them to 0.
led_intensity(
{channel: 0 for channel in ALL_LED_CHANNELS},
unit=unit,
source_of_event="self_test",
experiment=current_experiment_name,
verbose=False,
)
for led_channel in ALL_LED_CHANNELS:
varying_intensity_results: dict[PdChannel, list[float]] = {
pd_channel: [] for pd_channel in ALL_PD_CHANNELS
}
for intensity in INTENSITIES:
# turn on the LED to set intensity
led_intensity(
{led_channel: intensity},
unit=unit,
experiment=current_experiment_name,
verbose=False,
source_of_event="self_test",
)
# record from ADC, we'll average them
readings1 = adc_reader.take_reading()
readings2 = adc_reader.take_reading()
# Add to accumulating list
for pd_channel in ALL_PD_CHANNELS:
reading = 0.5 * (readings1[pd_channel] + readings2[pd_channel])
varying_intensity_results[pd_channel].append(reading)
# compute the linear correlation between the intensities and observed PD measurements
for pd_channel in ALL_PD_CHANNELS:
measured_correlation = round(
correlation(INTENSITIES, varying_intensity_results[pd_channel]), 2
)
results[(led_channel, pd_channel)] = measured_correlation
logger.debug(f"Corr({led_channel}, {pd_channel}) = {measured_correlation}")
# set back to 0
led_intensity(
{led_channel: 0},
unit=unit,
experiment=current_experiment_name,
verbose=False,
source_of_event="self_test",
)
logger.debug(f"Correlations between LEDs and PD:\n{pformat(results)}")
detected_relationships = []
for (led_channel, pd_channel), measured_correlation in results.items():
if measured_correlation > 0.925:
detected_relationships.append(
(
(config["leds"].get(led_channel) or led_channel),
(config["od_config.photodiode_channel"].get(pd_channel) or pd_channel),
)
)
publish(
f"pioreactor/{unit}/{experiment}/self_test/correlations_between_pds_and_leds",
dumps(detected_relationships),
retain=True,
)
# we require that the IR photodiodes defined in the config have a
# correlation with the IR led
pd_channels_to_test: list[PdChannel] = []
for (channel, angle_or_ref) in config["od_config.photodiode_channel"].items():
if angle_or_ref != "":
channel = cast(PdChannel, channel)
pd_channels_to_test.append(channel)
ir_led_channel = config["leds_reverse"][IR_keyword]
for ir_pd_channel in pd_channels_to_test:
assert (
results[(ir_led_channel, ir_pd_channel)] > 0.925
), f"missing {ir_led_channel} ⇝ {ir_pd_channel}, {list(zip(INTENSITIES, varying_intensity_results[pd_channel]))}"
assert (
varying_intensity_results[pd_channel][-1] > 1e-4
), f"{pd_channel} channel too low: {varying_intensity_results[pd_channel]}"
def test_ambient_light_interference(logger: Logger, unit: str, experiment: str) -> None:
# test ambient light IR interference. With all LEDs off, and the Pioreactor not in a sunny room, we should see near 0 light.
adc_reader = ADCReader(
channels=ALL_PD_CHANNELS,
dynamic_gain=False,
initial_gain=16,
fake_data=is_testing_env(),
)
adc_reader.setup_adc()
led_intensity(
{channel: 0 for channel in ALL_LED_CHANNELS},
unit=unit,
source_of_event="self_test",
experiment=experiment,
verbose=False,
)
readings = adc_reader.take_reading()
assert all([readings[pd_channel] < 0.005 for pd_channel in ALL_PD_CHANNELS]), readings
def test_REF_is_lower_than_0_dot_256_volts(logger: Logger, unit: str, experiment: str) -> None:
for (channel, angle_or_ref) in config["od_config.photodiode_channel"].items():
if angle_or_ref == "REF":
reference_channel = cast(PdChannel, channel)
ir_channel = config["leds_reverse"][IR_keyword]
ir_intensity = config.getfloat("od_config", "ir_intensity")
adc_reader = ADCReader(
channels=[reference_channel],
dynamic_gain=False,
initial_gain=1,
fake_data=is_testing_env(),
).setup_adc()
with change_leds_intensities_temporarily(
{ir_channel: ir_intensity},
unit=unit,
source_of_event="self_test",
experiment=experiment,
verbose=False,
):
readings = adc_reader.take_reading()
# provide a margin, since we have margins when determining change gain in od_reading
assert (
readings[reference_channel] < 0.256 * 0.9
), f"Recorded {readings[reference_channel]} in REF, should be less than 0.256."
def test_detect_heating_pcb(logger: Logger, unit: str, experiment: str) -> None:
assert is_heating_pcb_present()
def test_positive_correlation_between_temperature_and_heating(
logger: Logger, unit: str, experiment: str
) -> None:
assert is_heating_pcb_present()
with TemperatureController("silent", unit=unit, experiment=experiment) as tc:
measured_pcb_temps = []
dcs = list(range(0, 22, 3))
logger.debug("Varying heating.")
for dc in dcs:
tc._update_heater(dc)
sleep(2.0) # two cycles TODO: can I do 1 cycle?
measured_pcb_temps.append(tc.read_external_temperature())
tc._update_heater(0)
measured_correlation = round(correlation(dcs, measured_pcb_temps), 2)
logger.debug(f"Correlation between temp sensor and heating: {measured_correlation}")
assert measured_correlation > 0.9, (dcs, measured_pcb_temps)
def test_positive_correlation_between_rpm_and_stirring(
logger: Logger, unit: str, experiment: str
) -> None:
assert is_heating_pcb_present()
with local_persistant_storage("stirring_calibration") as cache:
if "linear_v1" in cache:
parameters = loads(cache["linear_v1"])
coef = parameters["rpm_coef"]
intercept = parameters["intercept"]
initial_dc = coef * 700 + intercept
else:
initial_dc = config.getfloat("stirring", "initial_duty_cycle")
dcs = []
measured_rpms = []
n_samples = 8
start = initial_dc
end = initial_dc * 0.66
with stirring.Stirrer(
target_rpm=0, unit=unit, experiment=experiment, rpm_calculator=None
) as st, stirring.RpmFromFrequency() as rpm_calc:
rpm_calc.setup()
st.duty_cycle = initial_dc
st.start_stirring()
sleep(1)
for i in range(n_samples):
dc = start * (1 - i / n_samples) + (i / n_samples) * end
st.set_duty_cycle(dc)
sleep(1)
measured_rpms.append(rpm_calc(4))
dcs.append(dc)
measured_correlation = round(correlation(dcs, measured_rpms), 2)
logger.debug(f"Correlation between stirring RPM and duty cycle: {measured_correlation}")
logger.debug(f"{dcs=}, {measured_rpms=}")
assert measured_correlation > 0.9, (dcs, measured_rpms)
HEATING_TESTS = [
test_detect_heating_pcb,
test_positive_correlation_between_temperature_and_heating,
]
STIRRING_TESTS = [test_positive_correlation_between_rpm_and_stirring]
OD_TESTS = [
test_pioreactor_HAT_present,
test_all_positive_correlations_between_pds_and_leds,
test_ambient_light_interference,
test_REF_is_lower_than_0_dot_256_volts,
]
class SummableList(list):
def __add__(self, other) -> SummableList:
return SummableList([s + o for (s, o) in zip(self, other)])
def __iadd__(self, other) -> SummableList:
return self + other
class BatchTestRunner:
def __init__(self, tests_to_run: list[Callable], *test_func_args):
self.count_tested = 0
self.count_passed = 0
self.tests_to_run = tests_to_run
self._thread = Thread(target=self._run, args=test_func_args, daemon=True)
def start(self):
self._thread.start()
return self
def collect(self) -> SummableList:
self._thread.join()
return SummableList([self.count_tested, self.count_passed])
def _run(self, logger, unit, experiment_name):
for test in self.tests_to_run:
test_name = test.__name__
try:
test(logger, unit, experiment_name)
except Exception:
import traceback
traceback.print_exc()
res = False
else:
res = True
logger.debug(f"{test_name}: {'✅' if res else '❌'}")
self.count_tested += 1
self.count_passed += int(res)
publish(
f"pioreactor/{unit}/{experiment_name}/self_test/{test_name}",
int(res),
retain=True,
)
@click.command(name="self_test")
@click.option("-k", help="see pytest's -k argument", type=str, default="")
def click_self_test(k: str) -> int:
"""
Test the input/output in the Pioreactor
"""
import sys
unit = get_unit_name()
testing_experiment = get_latest_testing_experiment_name()
experiment = get_latest_experiment_name()
logger = create_logger("self_test", unit=unit, experiment=experiment)
with publish_ready_to_disconnected_state(unit, testing_experiment, "self_test"):
if is_pio_job_running("od_reading", "temperature_automation", "stirring"):
logger.error(
"Make sure Optical Density, Temperature Automation, and Stirring are off before running a self test. Exiting."
)
return 1
# flicker to assist the user to confirm they are testing the right pioreactor.
publish(f"pioreactor/{unit}/{experiment}/monitor/flicker_led_response_okay", 1)
# automagically finds the test_ functions.
functions_to_test = {
f
for (name, f) in vars(sys.modules[__name__]).items()
if name.startswith("test_") and (k in name)
}
# and clear the mqtt cache first
for f in functions_to_test:
publish(
f"pioreactor/{unit}/{testing_experiment}/self_test/{f.__name__}",
None,
retain=True,
)
# run in parallel
test_args = (logger, unit, testing_experiment)
ODTests = BatchTestRunner(
[f for f in OD_TESTS if f in functions_to_test], *test_args
).start()
HeatingTests = BatchTestRunner(
[f for f in HEATING_TESTS if f in functions_to_test], *test_args
).start()
StirringTests = BatchTestRunner(
[f for f in STIRRING_TESTS if f in functions_to_test], *test_args
).start()
count_tested, count_passed = (
ODTests.collect() + HeatingTests.collect() + StirringTests.collect()
)
count_failures = count_tested - count_passed
publish(
f"pioreactor/{unit}/{testing_experiment}/self_test/all_tests_passed",
int(count_failures == 0),
retain=True,
)
if count_tested == 0:
logger.info("No tests ran 🟡")
elif count_failures == 0:
logger.info("All tests passed ✅")
elif count_failures > 0:
logger.info(f"{count_failures} failed test{'s' if count_failures > 1 else ''} ❌")
return int(count_failures > 0)
|
task.py
|
from __future__ import absolute_import, unicode_literals
import logging
import functools
import threading
import ast
import time
from django.http import HttpResponse
from libs import send_email, util
from libs import call_inception
from .models import (
Usermessage,
DatabaseList,
Account,
globalpermissions,
SqlOrder,
SqlRecord,
grained
)
CUSTOM_ERROR = logging.getLogger('Yearning.core.views')
def set_auth_group(user):
perm = {
'ddl': '0',
'ddlcon': [],
'dml': '0',
'dmlcon': [],
'dic': '0',
'diccon': [],
'dicedit': '0',
'user': '0',
'base': '0',
'dicexport': '0',
'person': [],
'query': '0',
'querycon': []
}
group = Account.objects.filter(username=user).first()
group_list = str(group.auth_group).split(',')
for group_name in group_list:
auth = grained.objects.filter(username=group_name).first()
if auth is not None:
for k, v in perm.items():
if isinstance(v, list):
v = list(set(v) | set(auth.permissions[k]))
elif v == '0':
v = auth.permissions[k]
perm[k] = v
return perm
def grained_permissions(func):
'''
:argument 装饰器函数,校验细化权限。非法请求直接返回401交由前端判断状态码
'''
@functools.wraps(func)
def wrapper(self, request, args=None):
if request.method == "PUT" and args != 'connection':
return func(self, request, args)
else:
if request.method == "GET":
permissions_type = request.GET.get('permissions_type')
else:
permissions_type = request.data['permissions_type']
if permissions_type == 'own_space' or permissions_type == 'query':
return func(self, request, args)
else:
group = set_auth_group(request.user)
if group is not None and group[permissions_type] == '1':
return func(self, request, args)
else:
return HttpResponse(status=401)
return wrapper
class order_push_message(threading.Thread):
'''
:argument 同意执行工单调用该方法异步处理数据
'''
def __init__(self, addr_ip, id, from_user, to_user):
super().__init__()
self.id = id
self.addr_ip = addr_ip
self.order = SqlOrder.objects.filter(id=id).first()
self.from_user = from_user
self.to_user = to_user
self.title = f'工单:{self.order.work_id}审核通过通知'
def run(self):
self.execute()
self.agreed()
def execute(self):
'''
:argument 将获得的sql语句提交给inception执行并将返回结果写入SqlRecord表,最后更改该工单SqlOrder表中的status
:param
self.order
self.id
:return: none
'''
time.sleep(self.order.delay * 60)
try:
detail = DatabaseList.objects.filter(id=self.order.bundle_id).first()
with call_inception.Inception(
LoginDic={
'host': detail.ip,
'user': detail.username,
'password': detail.password,
'db': self.order.basename,
'port': detail.port
}
) as f:
res = f.Execute(sql=self.order.sql, backup=self.order.backup)
for i in res:
if i['errlevel'] != 0:
SqlOrder.objects.filter(work_id=self.order.work_id).update(status=4)
SqlRecord.objects.get_or_create(
state=i['stagestatus'],
sql=i['sql'],
error=i['errormessage'],
workid=self.order.work_id,
affectrow=i['affected_rows'],
sequence=i['sequence'],
execute_time=i['execute_time'],
SQLSHA1=i['SQLSHA1'],
backup_dbname=i['backup_dbname']
)
except Exception as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}--邮箱推送失败: {e}')
finally:
status = SqlOrder.objects.filter(work_id=self.order.work_id).first()
if status.status != 4:
SqlOrder.objects.filter(id=self.id).update(status=1)
def agreed(self):
'''
:argument 将执行的结果通过站内信,email,dingding 发送
:param self.from_user
self.to_user
self.title
self.order
self.addr_ip
:return: none
'''
t = threading.Thread(target=order_push_message.con_close, args=(self,))
t.start()
t.join()
def con_close(self):
Usermessage.objects.get_or_create(
from_user=self.from_user, time=util.date(),
title=self.title, content='该工单已审核通过!', to_user=self.to_user,
state='unread'
)
content = DatabaseList.objects.filter(id=self.order.bundle_id).first()
mail = Account.objects.filter(username=self.to_user).first()
tag = globalpermissions.objects.filter(authorization='global').first()
if tag.message['ding']:
try:
util.dingding(
content='工单执行通知\n工单编号:%s\n发起人:%s\n地址:%s\n工单备注:%s\n状态:已执行\n备注:%s'
% (
self.order.work_id, self.order.username, self.addr_ip, self.order.text,
content.after),
url=ding_url())
except Exception as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}--钉钉推送失败: {e}')
if tag.message['mail']:
try:
if mail.email:
mess_info = {
'workid': self.order.work_id,
'to_user': self.order.username,
'addr': self.addr_ip,
'text': self.order.text,
'note': content.after}
put_mess = send_email.send_email(to_addr=mail.email)
put_mess.send_mail(mail_data=mess_info, type=0)
except Exception as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}--邮箱推送失败: {e}')
class rejected_push_messages(threading.Thread):
'''
:argument 驳回工单调用该方法异步处理数据
'''
def __init__(self, _tmpData, to_user, addr_ip, text):
super().__init__()
self.to_user = to_user
self._tmpData = _tmpData
self.addr_ip = addr_ip
self.text = text
def run(self):
self.execute()
def execute(self):
'''
:argument 更改该工单SqlOrder表中的status
:param
self._tmpData
self.addr_ip
self.text
self.to_user
:return: none
'''
mail = Account.objects.filter(username=self.to_user).first()
tag = globalpermissions.objects.filter(authorization='global').first()
if tag.message['ding']:
try:
util.dingding(
content='工单驳回通知\n工单编号:%s\n发起人:%s\n地址:%s\n驳回说明:%s\n状态:驳回'
% (self._tmpData['work_id'], self.to_user, self.addr_ip, self.text), url=ding_url())
except Exception as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}--钉钉推送失败: {e}')
if tag.message['mail']:
try:
if mail.email:
mess_info = {
'workid': self._tmpData['work_id'],
'to_user': self.to_user,
'addr': self.addr_ip,
'rejected': self.text}
put_mess = send_email.send_email(to_addr=mail.email)
put_mess.send_mail(mail_data=mess_info, type=1)
except Exception as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}--邮箱推送失败: {e}')
class submit_push_messages(threading.Thread):
'''
:argument 提交工单调用该方法异步处理数据
'''
def __init__(self, workId, user, addr_ip, text, assigned, id):
super().__init__()
self.workId = workId
self.user = user
self.addr_ip = addr_ip
self.text = text
self.assigned = assigned
self.id = id
def run(self):
self.submit()
def submit(self):
'''
:argument 更改该工单SqlOrder表中的status
:param
self.workId
self.user
self.addr_ip
self.text
self.assigned
self.id
:return: none
'''
content = DatabaseList.objects.filter(id=self.id).first()
mail = Account.objects.filter(username=self.assigned).first()
tag = globalpermissions.objects.filter(authorization='global').first()
if tag.message['ding']:
try:
util.dingding(
content='工单提交通知\n工单编号:%s\n发起人:%s\n地址:%s\n工单说明:%s\n状态:已提交\n备注:%s'
% (self.workId, self.user, self.addr_ip, self.text, content.before), url=ding_url())
except Exception as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}--钉钉推送失败: {e}')
if tag.message['mail']:
if mail.email:
mess_info = {
'workid': self.workId,
'to_user': self.user,
'addr': self.addr_ip,
'text': self.text,
'note': content.before}
try:
put_mess = send_email.send_email(to_addr=mail.email)
put_mess.send_mail(mail_data=mess_info, type=99)
except Exception as e:
CUSTOM_ERROR.error(f'{e.__class__.__name__}--邮箱推送失败: {e}')
def ding_url():
un_init = util.init_conf()
webhook = ast.literal_eval(un_init['message'])
return webhook['webhook']
|
server.py
|
"""
This code is taken from PyPi stream_service package
https://github.com/BR1py/stream_service
Stream Buffer Server main module
The naming in this module is always from the perspective of the server!
client writer -> transport data to the client
client reader -> read data from the client
The address_size can be defined in the server. It gives the number of bytes used for all kinds of addressing:
server_ids
channel_ids
client_ids
rpc_command_ids
data_package counter
transaction_ids
...
The packages exchanged have the following form
1. server_address for mirror servers (=0 if no server function should be mirrored)
2. channel_id (=0 cannot be used directly this is the server channel which is used for channel control, server control and rpc calls)
3. type byte: byte contains container type and code information
4. data package counter
5. rest of the package is the payload
# if container type is command
4. command id
5. rest are the command arguments
About RPC calls
the rpc calls are handled via server channel 0
Each client can call a exposed rpc function of the other client here.
All calls are asynchronous which means if the call request is send a transaction id is returned.
After the call is calculated the return is given with the related transaction id to the client.
The client object itself contains async or snyc calls handling for sync calls the Client will block the exeution and
wait for the return with the right transaction id.
"""
from __future__ import absolute_import
import asyncio
import traceback
from threading import Lock
from multiprocessing import Process
from asyncio import StreamReader, StreamWriter, QueueFull
import uuid
import time
import threading
from .lib import *
from .lib.frame import *
from .lib.buffer import *
# from .stream_commands import *
DEBUG = False # Variable used for development proposes (more print outputs given)
# curious leads into an exception when set -> I must investigate
__package__ = 'stream_service'
__version__ = '0.1.0'
__licence__ = 'MIT'
__author__ = 'B.R.'
__url__ = 'https://github.com/BR1py/stream_services'
__description__ = 'Client/Server implementation for data streaming in channels based on python >= 3.5'
# helper classes for the server:
class ServeClient():
__slots__ = ('_peername', '_id', '_name', '_control_key', '_async_reader', '_async_writer', '_rpc_methods_descs',
'_read_task', '_write_task', '_owned_chl_ids', '_write_chls', '_read_bufs', '_alive', '_rpc_methods',
'_send_cnt', '_id_bytes')
def __init__(self, peername, client_id, id_bytes, name, control_key, async_reader, async_writer, rpc_methods_descs):
"""
Object contains all client related information for each client that is connected to the sever
:param peername: Adresse of the client host
:param client_id: unique id of the client
:param id_bytes: client id already converted to bytes
:param name: name of the client
:param control_key: authentication key to delete the client
:param async_reader: asyncio StreamReader of the client (communication channel from the client to the server
:param async_writer: asyncio StreamWriter of the client (communication channel to the client from the server
:param rpc_methods_descs: rpc method description of the client (to be shared with other clients)
"""
self._peername = peername
self._id = client_id
self._name = name
self._control_key = control_key
self._id_bytes = id_bytes
self._async_reader = async_reader
self._async_writer = async_writer
self._rpc_methods_descs = rpc_methods_descs
self._read_task = None # here we store the client read task
self._owned_chl_ids = set() # ServeChannel object owned by this client
self._write_chls = {} # ServeChannel writers subscribed by the client
self._read_bufs = set() # ReadBuffer for the ServeChannels the client subscribed for reading
self._alive = True # alive flag for the _read_task
self._send_cnt = 0 # counter for statistics for the send frames
# make some private elements available
@property
def id(self):
return self._id
@property
def id_bytes(self):
return self._id_bytes
@property
def name(self):
return self._name
@property
def is_alive(self):
return self._alive
@property
def writer(self):
return self._async_writer
@property
def control_key(self):
return self._control_key
def stop(self):
"""
stop the read_task by switching the alive flag to False
(The task will stop after timeout of the read function is reached or when a read item is received)
"""
self._alive = False
def register_owned_chl(self, chl):
"""
Helper to register a new owned channel (used in the rpc_create_new_channel() method of the server)
(An owned channel is automatically appended as a writer channel)
:param chl: ServeChannel object that is owned by the channel
"""
chl_id = chl.id
self._owned_chl_ids.add(chl_id)
self._write_chls[chl_id] = chl
self._write_chls[chl.name] = chl
def set_read_task(self, task):
"""
Helper method to set the read task (used in the _connect() method of teh server)
:param task: give the asyncio task with the writer
:return:
"""
self._read_task = task
def set_send_cnt(self, cnt):
"""
helper to set the actual send count (used by the reader task)
:param cnt: new counter value
"""
self._send_cnt = cnt
def get_async_stream_objects(self):
"""
Helper to get the clients asyncio StreamReader and StreamWriter of the client
:return: StreamReader,StreamWriter
"""
return self._async_reader, self._async_writer
def get_rpc_method_desc(self):
"""
get the rpc method description of the client
:return:
"""
return self._rpc_methods_descs
class ServeChannel():
__slots__ = ('_parent', '_id', '_name', '_owner', '_is_public', '_is_single_in', '_lifetime',
'_read_bufs', '_control_key', '_transaction_ids', '_address_mask', '_read_writers',
'_channel_task', '_task_trigger', '_task_alive', '_loop', '_slot_time', '_write_cnt',
'_next_send_time', '_is_slot_time_fixed', '_address_size', '_statistic_chl', '_event_cnt',
'_statistic_chl_fill_rate', '_writer_clients', '_ta_ids_hdl')
def __init__(self, chl_id, name, owner, is_public, is_single_in, lifetime, loop, address_size,
fixed_slot_time=None):
"""
This class represents a channel on the server and it contains an own task to send the data to the subscribers
It's the most important class in the server!
:param chl_id: channel id
:param name: channel name
:param owner: channel owner (client that has created the class (this client must also delete the channel))
:param is_public: Will the channel be published by the server (server gives an info list if requested)
:param is_single_in: Only one writer is allowed for this channel
:param lifetime: temporary channels can be created too they will be eliminated after lifetime (in seconds)
:param loop: event loop of the server
:param address_size: address size used on this server
:param fixed_slot_time: channel has a fixed slot time (load balancing disabled for this channel)
"""
self._id = chl_id
self._name = name
self._owner = owner
self._is_public = is_public
self._is_single_in = is_single_in
self._lifetime = lifetime
self._address_size = address_size
# Create a mask based on address_size:
mask = 0
for i in range(address_size):
mask = (mask << 8) + 0xFF
self._address_mask = mask
self._control_key = uuid.uuid4().bytes # need for channel delete
self._ta_ids_hdl = IdsHandler(address_size) # Generater for Transaction ids used on this channel
self._read_bufs = {} # subscribed read buffers
self._writer_clients = {} # subscribed writers
self._task_alive = True # alive flag for the channel read task will be set to false to stop the task
self._loop = loop # event loop that should be used (the one from the sevrer)
self._task_trigger = asyncio.Event(loop=loop) # activity trigger for the read task used by client reader tasks to trigger the channel writer task
self._channel_task = self._loop.create_task(self._write_task()) # writer task is started here
if fixed_slot_time is None: # a fixed slot time can be set for a channel if not the slottime is set depending of the number of channels on the server
self._slot_time = 2 # default is 2 seconds
self._is_slot_time_fixed = False
else:
self._slot_time = fixed_slot_time # default is 2 seconds
self._is_slot_time_fixed = True
self._next_send_time = 0 # timing used for load balancing
self._write_cnt = 0 # statistics counter
self._event_cnt = 0 # statistics counter
self._statistic_chl = None # statistics channel (if set statistic data from the channel is put in this channel
self._statistic_chl_fill_rate = 1 # helper for statistic channel (gives the fill rate for this channel
# publish some internals
@property
def is_slot_time_fixed(self):
return self._is_slot_time_fixed
@property
def write_cnt(self):
return self._write_cnt
@property
def last_send_time(self):
return (self._next_send_time - self._slot_time)
@property
def control_key(self):
return self._control_key
@property
def name(self):
return self._name
@property
def id(self):
return self._id
@property
def is_single_in(self):
return self._is_single_in
def add_client_writer(self, client):
"""
add a new writer to the channel
:param client: writer client
:return:
"""
self._writer_clients[client.id] = client
def remove_client_writer(self, client_id):
"""
remove a writer from the channel
:param client_id: client id that should be removed
:return:
"""
if client_id in self._writer_clients:
del self._writer_clients[client_id]
return True
return False
def add_client_reader(self, buffer):
'''
add a given read buffer to the channel
:param buffer: Buffer() object (we expect that the additional kwargs in the object a set as expected ->
this is normally ensured in the rpc method of the server)
:return:
'''
client_id = buffer.client.id
if client_id in self._read_bufs:
raise StreamError_Channel(
'Given client_id: %s exists already as reader in the channel, delete first!' % repr(client_id))
if len(self._read_bufs) == 0:
self._notify_active(True)
self._read_bufs[client_id] = buffer
def remove_client_reader(self, client_id):
"""
remove a client reader from the channel
:param client_id:
:return:
"""
if client_id in self._read_bufs:
raise StreamError_Channel(
'Given client_id: %s not found in channel readers, cannot delete!' % repr(client_id))
read_buffer = self._read_bufs.pop(client_id)
# we clear the buffer and we send as last item an end data object
read_buffer.clear()
header = FrameHeader(CT_DATA_END, CODE_MARSHAL, address_size=self._address_size)
read_buffer.put_nowait(header, b'END DATA')
if len(self._read_bufs) == 0:
self._notify_active(False)
return read_buffer
def _notify_active(self, active=True):
"""
signalize if put_data activity is needed for this channel
To avoid useless traffic on the network the writers for this channel can be deactivated
in case no subscriber listens to the channel
:param active: True - activity on, False activity of
:return:
"""
for client in self._writer_clients.values():
frame = Frame(FrameHeader(CT_CMD, CODE_MARSHAL, command_id=1,
# rpc_command for switch the activity on clients must have id==1 !
client_id=client.id), self.id, active)
task = asyncio.run_coroutine_threadsafe(send_msg(client._async_writer, frame.encode()), self._loop)
task.result()
def set_slot_time(self, slot_time):
"""
slot time can be adapted in case of server load or to prioritize a channel
:param slot_time: slot time for sending
:return:
"""
if not self._is_slot_time_fixed:
self._slot_time = slot_time
def set_statistic_channel(self, chl, fill_rate):
"""
The statistic channel can be activated on the server to track the load,
with this method the channel is set and activated for this channel
:param chl: statistics channel
:param fill_rate: fill_rate for the statistic channel
:return:
"""
self._statistic_chl = chl
self._statistic_chl_fill_rate = fill_rate
def stop_channel(self):
"""
Will stop the channel read task (cannot be restarted!)
:return:
"""
self._task_alive = False
self._task_trigger.set()
async def _write_task(self):
"""
This tasks ensures that the data is send only in the given timeslot
Therefore we do not send directly we buffer in queues
"""
# prepare some locals for quicker access
# (The task should run as quick as possible to reduce the load on the server)
read_buffers = self._read_bufs
buf_infos = []
statistics_channel_id = None
statistic_header = None
statistics_channel_fill_rate = 1
last_statistics_event_cnt = -1
while self._task_alive: # Main write loop
# We wait for a trigger event signalize that we have new data
await self._task_trigger.wait()
# clear the event to be usable again
self._task_trigger.clear()
# print('Trigger received')
await asyncio.sleep(0.001) # give other tasks a slot
if self._statistic_chl is not None:
# if statistics channel is active we measure some buffer statistics before we put items out
buf_infos = [(buf.client.name, buf.qsize(), buf.full_cnt, buf.fill_cnt) for k, buf in
self._read_bufs.items() if
type(k) is int]
statistics_active = True
else:
statistics_active = False
# init some variables for break conditions
cnt = 0
t = self._loop.time()
end_time = t + self._slot_time
# take data from the buffers and send them to the clients:
while self._task_alive:
all_empty = True
for buffer in list(read_buffers.values()):
if not buffer.empty():
await asyncio.sleep(0.001) # give other tasks a slot
tx_data = await buffer.get() # take buffer data out (we might use here get_nowait too but hen we must handle the exception)
buffer.task_done() # we free the queue for next operation
await asyncio.sleep(0.001) # give other tasks a slot
await self._send_encoded(buffer.client._async_writer, tx_data) # send data to the client
# print('Send to client',tx_data)
cnt = (cnt + 1) & 0b11111111
all_empty = False
# check break conditions:
# we break earliest after sending at least one item per read_buffer
if end_time > self._loop.time():
# time slot over
self._write_cnt = cnt
self._next_send_time = self._loop.time() + self._slot_time
await asyncio.sleep(0.001)
# set trigger (we are not finished)
self._task_trigger.set()
break
if all_empty:
# all read buffers are empty -> Fine no data lost in this case!
self._write_cnt = cnt
self._next_send_time = self._loop.time() + self._slot_time
await asyncio.sleep(0.1) # here we wait a bit longer (the buffers are well cleaned)
break
if statistics_active is not None:
# if statistics channel is active we send statistics data
# this will slow down the task!
if not last_statistics_event_cnt == cnt:
try:
# We put this in try except because in a corner-case the statistic channel might be set to None
# during this operation!
chl_id = self._statistic_chl.id
if statistics_channel_id != chl_id:
# channel changed!
# create new header
statistic_header = FrameHeader(CT_DATA, CODE_MARSHAL, channel_id=chl_id)
statistics_channel_id = chl_id
if statistics_channel_id == self.id:
# we do not use a fill_rate on the statistics channel itself
statistics_channel_fill_rate = 1
else:
statistics_channel_fill_rate = self._statistic_chl_fill_rate
if not cnt % statistics_channel_fill_rate:
info = '%f %s(%s): send_count: %i; event_count: %i; buffer sizes/full_cnt/fill_cnt: %s\n' % (
t, self._name, self._id, cnt, self._event_cnt, repr(buf_infos))
frame = Frame(statistic_header, info)
self._statistic_chl.put_nowait(CT_DATA, frame.encode())
last_statistics_event_cnt = cnt
except:
pass
def get_new_transaction_id(self):
"""
We do not have transaction objects on server like on the clients but we use transaction ids in some cases
Here we can create unique ids
Will raise an Exception IndexError (or ValueError) in case no more id is available
:return:
"""
return self._ta_ids_hdl.get_new_id()
def clear_buffers(self):
"""
clear all the buffers
:return:
"""
for read_buffer in list(self._read_bufs.values()):
read_buffer.clear()
def put_nowait(self, container_type, data):
"""
put data function used by the client writers
Inside we distribute the data to the buffers
:param container_type: This info is already parsed by the reader thread and we reuse it here
:param data: raw data received for this channel
:return:
"""
force = False
if container_type == CT_DATA_END:
# DATA :END RECEIVED!
# In case an data end is received the transaction id must be freed for reuse
header = FrameHeader(CT_DECODE, data, self._address_size)
self._ta_ids_hdl.free_id(header.transaction_id)
force=True
self._event_cnt = (self._event_cnt + 1) & 0xFFFFFFFF #statistics counter
for read_buf in list(self._read_bufs.values()): # put data into the buffers
read_buf.put_nowait(data,force=force)
if self._loop.time() > self._next_send_time:
# we set the task trigger event to signalize the send task that we have new data to be send
self._task_trigger.set()
async def put(self, container_type, data):
"""
async version of put_nowait but this is not used at the Moment
:param container_type:
:param data:
:return:
"""
if container_type == CT_DATA_END:
# DATA :END RECEIVED!
header = FrameHeader(CT_DECODE, data, self._address_size)
self._ta_ids_hdl.free_id(header.transaction_id)
for read_buf in list(self._read_bufs.values()):
await read_buf.put(data)
if self._loop.time() > self._next_send_time:
self._task_trigger.set()
async def _send_encoded(self, writer, tx_data):
'''
helper method to encode the send data
the encoding is done depending on the tx_data type given
:param writer: writer
:param tx_data: tx_data can be bytes or tuple header and payload or a Frame() object
:return:
'''
if type(tx_data) is tuple: # we have header object and payload!
await send_msg(writer, tx_data[0].encode() + tx_data[1])
elif type(tx_data) is bytes: # we have raw bytes
await send_msg(writer, tx_data)
else: # we have a tx_frame
await send_msg(writer, tx_data.encode())
# Server:
class StreamChannelServer_Process(Process):
def __init__(self, ip_address=None, port=None, ssl=None,
address_size=2, log_file=None, quiet=False, name='StreamDataServer',
client_authenticate=b'STRM_Client',
# scale and performance parameters
max_clients=100,
max_slottime=30,
min_slottime=2,
default_limit=2 ** 16,
statistic_channel_name='ServerChannelStatistics',
daemon=False, *kwargs):
"""
Server Process - main stream service server class
Important parts of the class init are executed in the moment the process is started (see run() method)
:param ip_address: network IP address that will be used by the server
:param port: network port that will be used by the server
:param ssl: optional ssl configuration (search for sll on asyncio servers for more details)
:param address_size: address_size used on the server default is 2 bytes
e.g. if you need more channels this can be extended but the frame size increases in this case!
:param log_file: USe a log file to store the log messages of the server (the file will be limited in size)
:param quiet: run in quiet mode (no log outputs printed into the terminal)
:param name: name of the server
:param client_authenticate: authentication bytes for clients (need for connections)
:param max_clients: maximum number of clients accepted by the server
:param max_slottime: maximum slot time might be increased depending on the machine the server is running on;
Used for load balancing, it's the upper limit for the slot-time a channel has to send data
and it is divided by the number of channels on the server
(normally the parameter should be kept as it is)
:param min_slottime: minimum slot time might be increased depending on the machine the server is running on
Used for load balancing; if a lot of channels running on the server the slot-time
for writing is fixed at this value
(normally the parameter should be kept as it is
:param default_limit: This is the size of the StreamWriter buffer (normally this should not be changed)
:param statistic_channel_name: name of the statistic channel
:param daemon: run the process in daemon mode
:param kwargs: additional arguments to give to the asyncio server
"""
if port is None:
port = DEFAULT_BUFFER_STREAM_MGR_PORT
self._address = (ip_address, port)
self._kwargs=kwargs
self._address_size = address_size
self._max_slottime = max_slottime
self._min_slottime = min_slottime
self._default_limit = default_limit
self._current_max_writer_size = self._default_limit
self._current_slot_delta_time = self._max_slottime
self._ssl = ssl
self._statistic_channel_name = statistic_channel_name
self._statistic_chl = None
self._statistic_chl_fill_rate = 1
mask = 0
for i in range(address_size):
mask = (mask << 8) + 0xFF
self._address_mask = mask
# prepare random generator
super().__init__(name=name, daemon=daemon)
self._srv_lock = None # lock must be instanced in run
self._server_ready = False
self._server_active = True
self._logger = Logger(log_file=log_file, quiet=quiet)
self._write_log = self._logger.write_log
if type(client_authenticate) is not bytes:
raise TypeError('Given client_authenticate parameter must be of type bytes')
self._client_authenticate = client_authenticate
self._clients = {}
self._max_clients = max_clients
self._channels = {}
# the following two items are not used in the channels only on the server for client rpc calls
self._server_transactions={}
self._server_transaction_id_hdl = IdsHandler(address_size)
# id handlers
self._client_id_hdl = IdsHandler(address_size)
self._channel_id_hdl = IdsHandler(address_size)
self._rpc_methods_descs = []
self._rpc_methods = []
@property
def address_size(self):
return self._address_size
def run(self):
try:
# post initialization:
self._write_log('Start StreamChannelServer (Version %s)' % __version__)
self._srv_lock = Lock()
self._rpc_methods_descs, self._rpc_methods = rpc.RPCFactory.class_init_rpc_functions(self, first_items=[
'rpc_authenticate_client'],address_size=self._address_size)
#print(self._rpc_methods_descs)
#Event loop
self._loop = loop = asyncio.get_event_loop()
#self._loop.set_debug(DEBUG)
#prepare the asyncio server:
co_rtn = asyncio.start_server(self.client_connect, host=self._address[0], port=self._address[1], loop=loop,
limit=self._default_limit, ssl=self._ssl,**self._kwargs)
self._server = loop.run_until_complete(co_rtn) # this will be executed in the moment the event loop is started!
# Serve requests until Ctrl+C is pressed
self._write_log('Serving on {}'.format(self._server.sockets[0].getsockname()))
self._server_ready = True
# start the event loop and run for ever!
loop.run_forever()
except:
self._write_log('Exception in StreamChannelServer:\n')
self._write_log(''.join(traceback.format_exc()))
async def client_connect(self, reader: StreamReader, writer: StreamWriter):
'''
request from a client for a new connection
this is the asyncio standard connect functionality
The return of this method is send to the client
Note:: In this method the internal client related objects are created especially the two tasks
for reading and sending from/to the client
State: code tested
:param reader: client StreamReader object
:param writer: client StreamWriter object
'''
rx_header = None
address_size = self._address_size
try:
# get the peername
peername = writer.get_extra_info('peername')
self._write_log('client_connect(): Client Request from %s' % str(peername))
# is the server full?
# we must divide by 2 because we store each client two times in the dict (id and name)!
if len(self._clients) / 2 >= self._max_clients:
self._write_log('client_connect(): Client Request denied, to many clients connected')
# negative response server full
await send_msg(writer, b'0')
return
# Server starts communication and send address_size and seed
# here we put some small security in
# for more one must use ssl!
seed = uuid.uuid4().bytes + uuid.uuid4().bytes
raw_key = get_raw_key(seed, self._client_authenticate)
if DEBUG: print('client_connect(): seed+key pair is:', seed, raw_key)
tx_bytes = address_size.to_bytes(1, BO) + seed
if DEBUG: print('client_connect(): First message send is:', tx_bytes)
await send_msg(writer, tx_bytes)
# waiting for clients response with authentication info
rx_bytes = await read_msg(reader)
if DEBUG: print('client_connect(): Package raw bytes received:', rx_bytes)
# decode received answer
rx_frame = Frame(FrameHeader(CT_DECODE, address_size=address_size), rx_bytes)
rx_header = rx_frame.header
if DEBUG: print('client_connect(): Frame received:', repr(rx_frame))
# we already prepare the response header here, will be used in the exceptions too!
tx_header = FrameHeader(container_type=CT_RETURN,
coder_type=CODE_PICKLE,
transaction_id=rx_header.transaction_id,
address_size=address_size)
if rx_header.container_type != CT_SRV_CMD and rx_header.command_id != 0:
self._write_log('client_connect(): Client Request denied, wrong connect command send')
raise StreamError_Command('client_connect(): Wrong connect command send')
# authenticate client
args = rx_frame.args
back = self.rpc_authenticate_client(*args, raw_key=raw_key)
if back[0] != 0:
self._write_log('client_connect(): Client Request denied, authentication failed')
raise StreamError_Authenticate('client_connect(): Client Request denied, authentication failed')
# extract other data
client_name = back[1]
rpc_methods_descs = back[2]
if DEBUG: print('client_connect(): Client authentication finished')
# create a new client object
client_id = self._client_id_hdl.get_new_id()
if client_id is None:
self._write_log('client_connect(): Client Request denied, to many clients connected')
raise StreamError_Authenticate('client_connect(): Client Request denied, no free id found')
control_key = uuid.uuid4().bytes
id_bytes = client_id.to_bytes(address_size, BO)
client = ServeClient(peername, client_id, id_bytes, client_name,
control_key, reader, writer, rpc_methods_descs)
# enter client in the client dict
with self._srv_lock:
# we add the client two times in the client dict because we like to reach it quick per name or id
self._clients[client_id] = client
self._clients[client_name] = client
client.set_read_task(self._loop.create_task(self._client_task_loop(client)))
# rescale the writer limits:
l = len(self._clients) / 2
self._current_max_writer_size = self._default_limit / l
self._current_slot_delta_time = self._max_slottime / l
if self._current_slot_delta_time < self._min_slottime:
self._current_slot_delta_time = self._min_slottime
# asyncio.gather(client.read_task,client.read_task)
if DEBUG: print('client_connect(): Server related Client object created', client)
# Create the Client default channel
# create positive response
tx_frame = Frame(tx_header, client_id, control_key, self._rpc_methods_descs)
if DEBUG: print('client_connect(): Positive response pkg build', tx_frame)
tx_bytes = tx_frame.encode()
if DEBUG: print('client_connect(): Positive response send', tx_bytes)
# time.sleep(2) # give the loops some time to come up
# give the feedback that client setup is finished
await send_msg(writer, tx_bytes)
self._write_log('client_connect(): Client connection established all server internal objects created')
except Exception as e:
# Catch exception and if possible send back to the client
self._write_log('client_connect(): Exception:')
self._write_log(repr(e))
self._write_log(traceback.format_exc())
if rx_header is None:
# give unspecific feedback
try:
await send_msg(writer,
Frame(FrameHeader(CT_EXCEPTION, CODE_MARSHAL),
repr(e),
traceback.format_exc()).encode())
except:
self._write_log(
'client_connect(): Unspecific Exception return not send to client; sub-exception traceback: '
'%s' % ''.join(traceback.format_exc()))
else:
# give specific feedback
try:
await send_msg(writer,
Frame(FrameHeader(CT_EXCEPTION, CODE_MARSHAL,
transaction_id=rx_header.transaction_id,
command_id=rx_header.command_id,
channel_id=rx_header.channel_id,
client_id=rx_header.client_id,
counter=rx_header.counter),
repr(e), ''.join(traceback.format_exc())).encode())
except:
self._write_log('client_connect(): Exception return not send to client; sub-exception traceback: '
'%s' % ''.join(traceback.format_exc()))
async def _send_encoded(self, writer, tx_data):
'''
helper method to encode the send data
the encoding is done depending on the tx_data type given
:param writer: writer
:param tx_data: tx_data can be bytes or tuple header and payload or a Frame() object
:return:
'''
if type(tx_data) is tuple: # we have header object and payload!
await send_msg(writer, tx_data[0].encode() + tx_data[1])
elif type(tx_data) is bytes: # we have raw bytes
await send_msg(writer, tx_data)
else: # we have a tx_frame
await send_msg(writer, tx_data.encode())
async def _client_task_loop(self, client):
# This is the client reader loop
# prepare some locals for quicker access
reader, writer = client.get_async_stream_objects()
address_size = self._address_size
id_bytes = client.id_bytes
if client._alive:
self._write_log('Client %s read from client loop/task started' % client.name)
none_cnt = 0
loop_cnt = 0
first_id_slice = slice(2, self._address_size + 2)
second_id_slice = slice(2 + self._address_size, 2 * self._address_size + 2)
while client.is_alive:
# loop_cnt+=1
await asyncio.sleep(0.001) # this allows other threads to be executed in parallel
rx_header = None
try:
rx_bytes = False
try:
rx_bytes = await asyncio.wait_for(read_msg(reader),
10) # timeout is used just to clean the client if it no longer exists
except:
rx_bytes = False
await asyncio.sleep(0.001) # this allows other threads to be executed in parallel
# print('RX',rx_bytes)
if rx_bytes is None:
# in case of None the client might be died but we give the loop some more tries
none_cnt += 1
if none_cnt > 10:
# client died!
client.stop()
self._write_log('Client %s/%s connection error, connection will be killed from server' % (
str(client.name), str(client.id)))
break
continue
elif rx_bytes is not False:
if DEBUG: print('Frame received:', rx_bytes)
# we got a valid frame we reset the none counter:
none_cnt = 0
# Take data from clients and put it in the write queues
# extract the container type
container_type = rx_bytes[0] & 0b1111
# depending of the container type we react
if container_type == CT_DATA or container_type == CT_DATA_END:
chl_id = int.from_bytes(rx_bytes[first_id_slice], BO)
# channel related data we put into the channel send queue
chl = self._channels.get(chl_id)
if chl is None:
raise StreamError_Channel('Given channel_id %s is unknown or not writeable' % (chl_id))
await asyncio.sleep(0.001) # this allows other threads to be executed in parallel
try:
chl.put_nowait(container_type, rx_bytes)
except:
print(''.join(traceback.format_exc()))
await asyncio.sleep(0.001) # this allows other threads to be executed in parallel
if DEBUG: print('Data frame received and put in channel %i:' % chl_id, rx_bytes)
elif container_type == CT_RETURN or container_type == CT_CMD:
# here we route from one client to another client
if rx_bytes[1] & 0b10 == 0:
# no client given -> server request response
# we ignore
continue
target_client_id = int.from_bytes(rx_bytes[first_id_slice], BO)
if target_client_id == 0:
# this is a return to a server call return we ignore
# e.g. from channel activation calls
continue
if target_client_id is None or target_client_id == client.id:
# no client_id or this client_id is given we just send to the client
await asyncio.sleep(0.001) # this allows other threads to be executed in parallel
await send_msg(writer, rx_bytes)
else:
# the return is target to another client
# we exchange the client_id with this client to mark the source and send it ot the target
target_client = self._clients.get(target_client_id)
if target_client is not None:
# in following step client_id is changed to source
await asyncio.sleep(0.001) # this allows other threads to be executed in parallel
# exchange target client id with source client id:
new_bytes = b''.join([rx_bytes[:first_id_slice.start],
client.id_bytes,
rx_bytes[first_id_slice.stop:]
])
await send_msg(target_client._async_writer, new_bytes)
else:
raise StreamError_Target(
'Unknown target (%s) given, return cannot be send' % (repr(target_client_id)))
elif container_type == CT_SRV_CMD:
t = threading.Thread(target=self._execute_rpc_threaded, args=(client, rx_bytes))
t.start()
continue
rx_header = FrameHeader(CT_DECODE, rx_bytes, address_size=address_size)
rx_payload = rx_bytes[len(rx_header):]
# server rpc calls are executed here
try:
srv_cmd = self._rpc_methods[rx_header.command_id]
except IndexError:
srv_cmd = None
if srv_cmd is None or srv_cmd == self.rpc_authenticate_client:
# the authentication command (cmd_id==0) is protected and is used only in the client_connect()
raise StreamError_Command('StreamServer - RPC no/unknown command given')
# prepare the command arguments
rx_frame = Frame(rx_header)
rx_frame.decode_payload(rx_payload)
# we replace the client argument with the client object
if len(rx_frame.args) > 0:
args = [client] + list(rx_frame.args[1:])
else:
args = [client]
kwargs = rx_frame.kwargs
if 'client_id' in kwargs:
del kwargs['client_id']
# execute the command
back = srv_cmd(*args, **kwargs)
# prepare the return frame
tx_header = FrameHeader(container_type=CT_RETURN,
coder_type=CODE_MARSHAL,
transaction_id=rx_header.transaction_id,
address_size=address_size)
tx_frame = Frame(tx_header, back)
await asyncio.sleep(0.001) # this allows other threads to be executed in parallel
# put it in the write queue
await send_msg(writer, tx_frame.encode())
if DEBUG: print('tx_frame send to client:', tx_frame)
except Exception as e:
e_repr = repr(e)
tb = ''.join(traceback.format_exc())
# Catch read exceptions and give feedback
self._write_log('Client RX-Loop: Exception:')
self._write_log(e_repr)
self._write_log(repr(tb))
# exceptions are send directly not via writer loop to the client
if rx_header is None:
# no header received we give unclear feedback
try:
await send_msg(writer,
Frame(FrameHeader(CT_EXCEPTION, CODE_MARSHAL), repr(e), repr(tb)).encode())
except:
self._write_log('LOOP read_from_client(): Unspecific Exception not send to client; '
'sub-exception traceback: %s' % ''.join(traceback.format_exc()))
else:
# header received we give specific feedback
try:
await send_msg(writer,
Frame(FrameHeader(CT_EXCEPTION, CODE_MARSHAL,
transaction_id=rx_header.transaction_id,
command_id=rx_header.command_id,
channel_id=rx_header.channel_id,
client_id=rx_header.client_id,
counter=rx_header.counter),
repr(e), tb).encode())
except:
self._write_log('LOOP Client: %s: Exception return not send to client; '
'sub-exception traceback: %s' % (client.name, ''.join(traceback.format_exc())))
self._write_log('Client %s read from client loop/task stopped' % client.name)
# if this loop is stopped the client is stopped or died
# we delete the client
client_id = client.id
self._write_log('Client %i died' % client_id)
try:
self.rpc_delete_client(client, client.control_key)
except:
print('Client delete failed:', traceback.format_exc())
pass
def _get_writer_bufferlen(self, writer):
"""
helper delivering the StreamWriter buffer length
Used for load statistics
:param writer: StreamWriter object to get the length from
:return:
"""
return len(writer.transport._buffer)
def __get_new_transaction(self, tto=None):
'''
a helper function to create a new transaction object
:param tto: give the transaction_timeout for this transaction
:return:
'''
if tto is None:
tto = TTO_DEFAULT
return Transaction(self._loop, self._server_transactions, self._server_transaction_id_hdl, self._address_size, tto,
TTO_DEFAULT)
def _set_channel_slot_times(self):
"""
helper setting the channel slot explicit
:return:
"""
l = len(self._channels) / 2
if l == 0:
return
current_slot_delta_time = self._max_slottime / l
if current_slot_delta_time < self._min_slottime:
current_slot_delta_time = self._min_slottime
for k, c in self._channels.items():
if type(k) is not int:
continue
c.set_slot_time(current_slot_delta_time)
def _execute_rpc_threaded(self, client, rx_bytes):
"""
rpc commands on the server are executed in extra thread so that we must not wait in the client read loops
for the return of the commands
:param client: client
:param rx_bytes: bytes that contains the received command content
:return:
"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
rx_header = FrameHeader(CT_DECODE, rx_bytes, address_size=self._address_size)
rx_payload = rx_bytes[len(rx_header):]
# server rpc calls are executed here
try:
srv_cmd = self._rpc_methods[rx_header.command_id]
except IndexError:
srv_cmd = None
if srv_cmd is None or srv_cmd == self.rpc_authenticate_client:
# the authentication command (cmd_id==0) is protected and is used only in the client_connect()
raise StreamError_Command('StreamServer - RPC no/unknown command given')
# prepare the command arguments
rx_frame = Frame(rx_header)
rx_frame.decode_payload(rx_payload)
# we replace the client argument with the client object
args = rx_frame.args
try:
l = len(args)
except:
l = 0
if l > 0 and (type(args) not in {str, bytes}):
args = [client] + list(args[1:])
else:
args = [client]
kwargs = rx_frame.kwargs
if 'client_id' in kwargs:
del kwargs['client_id']
# execute the command
back = srv_cmd(*args, **kwargs)
# prepare the return frame
tx_header = FrameHeader(container_type=CT_RETURN,
coder_type=CODE_PICKLE,
transaction_id=rx_header.transaction_id,
address_size=self._address_size)
# if (hasattr(back, '__iter__')) and (type(back) not in {str,bytes}):
# tx_frame = Frame(tx_header, *back)
# else:
tx_frame = Frame(tx_header, back)
# put it in the write queue
loop.run_until_complete(send_msg(client._async_writer, tx_frame.encode()))
if DEBUG: print('tx_frame send to client:', tx_frame)
def close_server(self):
# Close the server
if self._server is not None:
self._server.close()
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(self._server.wait_closed())
loop.close()
except:
pass
# RPC (Remote Procedure Calls)
# rpc methods (exchange via channel 0)
# the first argument for server rpc commands must be always the client_id this argument will be replaced
# in the call with the client object so that it is available during the execution
def rpc_authenticate_client(self, authentication_key, client_name, rpc_methods_descs, raw_key=None):
'''
This command is an exception and cannot be called from the client explicitly
it is just used in the authentication process during the client_connect()
Note:: Direct usage will lead into an exception!
:param authentication_key:
:param client_name:
:param rpc_methods_descs:
:param raw_key:
:return:
'''
if authentication_key != raw_key:
return 'Wrong authentication_key given'
if DEBUG: print('Client authentication successful')
return 0, client_name, rpc_methods_descs
def rpc_echo(self, client_id, *args,**kwargs):
"""
With this method one can test the rpc connection
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:param args: arguments list to be echoed into the return
:param kwargs: keyword arguments dict to be echoed into the return
:return args,kwargs
"""
return args,kwargs
def rpc_create_new_channel(self, client_id, chl_name, public=True, lifetime=None, single_in=True,
fixed_slot_time=None):
"""
This command creates a new channel
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:param chl_name: New channels name
:param public: Is the channel public or not?
* True - is public channel will be shown in channel list
* False - channel is not shown in channel list and can only be reached if the name or
ID is known by the client
:param lifetime: We can create temporary channels too they will automatically deleted after lifetime
:param single_in: Can the channel have other writers?
* True - only owner can write
* False - other channel can subscribe for writing
:param fixed_slot_time: If the parameter is set a fixed slot time in seconds is set.
A part of load balancing is switched off in this case.
Special channel might be prioritized higher by this parameter
:return: channel id, channel control key
"""
client = client_id # the client object is here already in (see read loop)
self._write_log('New channel %s request from client %i/%s' % (chl_name, client.id, client.name))
# the given channel name must be stored as a string (what ever we have received)
chl_name = str(chl_name)
if chl_name in self._channels or chl_name == self._statistic_channel_name:
self._write_log('New channel %s request from client %i/%s - denied; channel exists already' % (
chl_name, client.id, client.name))
raise StreamError_Channel('Channel name=%s already in use' % repr(chl_name))
chl_id = self._channel_id_hdl.get_new_id()
if chl_id is None:
self._write_log('New channel %s request from client %i/%s - denied; no free channel' % (
chl_name, client.id, client.name))
raise StreamError_Channel('No free channel')
new_chl = ServeChannel(chl_id, chl_name, client.id, public, single_in, lifetime,
self._loop, self._address_size, fixed_slot_time)
if DEBUG: print('New channel object created', new_chl)
# we fill the channel dict with both id and name (channel can be accessed via name or id)
self._channels[chl_id] = new_chl
self._channels[chl_name] = new_chl
self._set_channel_slot_times()
client.register_owned_chl(new_chl)
new_chl.add_client_writer(client)
self._write_log('Channel %s creation finished' % chl_name)
if DEBUG: print('Channel data returned:', chl_id, new_chl.control_key)
if lifetime is not None:
#start timer to delete the channel:
threading.Timer(lifetime,self.rpc_delete_channel,args=('SERVER', new_chl.id, new_chl._control_key))
return chl_id, new_chl.control_key
def rpc_subscribe_read_from_channel(self, client_id, chl_id_or_name, buffer_size=100,
full_behavior=RING_BUFFER_FULL, fill_rate=1):
"""
rpc method to subscribe a read on a channel
Based on the given parameters a read buffer is created to read the data
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:param chl_id_or_name: channel identification id or name
:param buffer_size: size of the read buffer
:param full_behavior: Type of full behavior see Buffer object for details
:param fill_rate: Integer defines how often we sample,
* if fill rate is 1 any channel item is put in the read buffer
* if fill rate is 2 any second item will be put in the read buffer
* if fill rate is n and n item will be put in the read buffer
return channel name, channel id
"""
client = client_id # the client object is here already in (see read loop)
self._write_log('Channel read subscription request from client %i/%s' % (client.id, client.name))
try:
chl = self._channels[chl_id_or_name]
except KeyError:
raise StreamError_Channel('Given channel name=%s is unknown' % repr(chl_id_or_name))
new_read_buffer = Buffer(buffer_size, full_behavior, fill_rate=fill_rate,
chl_id=chl.id, chl_name=chl.name, client=client)
chl.add_client_reader(new_read_buffer)
client._read_bufs.add(new_read_buffer)
self._write_log('Channel %s(%s) read subscription finished' % (chl.name, chl.id))
return chl.name, chl.id
def rpc_unsubscribe_read_from_channel(self, client_id, chl_id_or_name):
"""
Delete the subscription of a read buffer for a client on the channel
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:param chl_id_or_name: channel identification id or name
"""
client = client_id # the client object is here already in (see read loop)
self._write_log('Channel read subscription delete request from client %i/%s' % (client.id, client.name))
try:
chl = self._channels[chl_id_or_name]
except KeyError:
raise StreamError_Channel('Given channel name=%s is unknown' % repr(chl_id_or_name))
found = False
client_id=client.id
rb=chl.remove_client_reader(client.id)
client._read_bufs.remove(rb)
del rb
self._write_log('Channel read subscription deleted')
return True
def rpc_subscribe_write_to_channel(self, client_id, chl_id_or_name):
"""
subscribe on a channel as additional writer
Only for channels which are configured as multi input (no single_in)
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:param chl_id_or_name: channel identification id or name
:return: channel_name,channel_id
"""
client = client_id # the client object is here already in (see read loop)
self._write_log('Channel write subscription request from client %i/%s' % (client.id, client.name))
try:
chl = self._channels[chl_id_or_name]
except KeyError:
raise StreamError_Channel('Given channel name=%s is unknown' % repr(chl_id_or_name))
if chl.is_single_in:
raise StreamError_Channel('Given channel name=%s; rejected other writers' % repr(chl_id_or_name))
client._write_chls[chl.id] = chl
client._write_chls[chl.name] = chl
chl.add_client_writer(client)
self._write_log('Channel write subscription finished')
return chl.name, chl.id
def rpc_unsubscribe_write_to_channel(self, client_id, chl_id_or_name):
"""
Delete the subscribe on a channel as additional writer
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:param chl_id_or_name: channel identification id or name
:return: True
"""
client = client_id # the client object is here already in (see read loop)
self._write_log('Channel read subscription request from client %i/%s' % (client.id, client.name))
try:
chl = self._channels[chl_id_or_name]
except KeyError:
raise StreamError_Channel('Given channel name=%s is unknown' % repr(chl_id_or_name))
if chl.is_single_in:
raise StreamError_Channel('Given channel name=%s; rejected other writers' % repr(chl_id_or_name))
client._write_chls[chl.id] = chl
client._write_chls[chl.name] = chl
chl.remove_client_writer(client.id)
return True
def rpc_delete_channel(self, client_id, chl_id_or_name, channel_control_key):
"""
rpc method to delete a channel
can only be called by the owner
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:param chl_id_or_name: channel identification id or name
:param channel_control_key: key used for authenticate the deletion
:return:
"""
client = client_id # the client object is here already in (see read loop)
self._write_log('Delete channel %i request received' % (chl_id_or_name))
chl = self._channels.get(chl_id_or_name)
if chl is None:
raise StreamError_Channel('Unknown channel %s' % repr(chl_id_or_name))
chl.stop_channel() # stop background thread
chl_id = chl.id
chl_name = chl.name
if client == 'SERVER':
#Server request the delete!
client=self._clients[chl._owner]
if chl_id not in client._owned_chl_ids:
self._write_log('Delete channel %i request denied, client is not owner' % (chl_id))
raise StreamError_Authenticate('Authenticate failed, client not owner of the channel')
if chl.control_key != channel_control_key:
self._write_log('Delete channel %i request denied, wrong channel control key given' % (chl_id))
raise StreamError_Authenticate('Authenticate failed, wrong channel control key given')
# remove all readers in all clients first
for sub_client in self._clients.values():
for buf in chl._read_bufs:
if buf in sub_client._read_bufs:
sub_client._read_bufs.remove(buf)
if chl_id in sub_client._write_chls:
del sub_client._write_chls[chl_id]
del sub_client._write_chls[chl_name]
# remove the channel from all dicts
client._owned_chl_ids.remove(chl_id)
del self._channels[chl_id]
del self._channels[chl_name]
self._channel_id_hdl.free_id(chl_id)
self._set_channel_slot_times()
self._write_log('Channel %i/%s deleted' % (chl_id, chl_name))
return 'Channel deleted'
def rpc_delete_client(self, client_id, client_control_key):
"""
rpc command to delete a client and close the connection
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:param client_control_key: key used for authenticate the deletion
:return:
"""
client = client_id # the client object is here already in (see read loop)
self._write_log('Delete client (%i,%s) request received' % (client.id, client.name))
# first we check the control key:
if client.control_key != client_control_key:
raise StreamError_Authenticate('Authenticate failed, client cannot be deleted')
with self._srv_lock:
for chl_id in list(client._owned_chl_ids):
chl = self._channels[chl_id]
self.rpc_delete_channel(client, chl.id, chl.control_key)
# finally remove the two client entries in the clients dict
try:
del self._clients[client.id]
except:
pass
try:
del self._clients[client.name]
except:
pass
self._client_id_hdl.free_id(client.id)
self._write_log('Client (%i,%s) removed' % (client.id, client.name))
return 'Client removed'
def rpc_get_clients(self, client_id):
"""
delivers a list off all connected clients on this server
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:return: list of tuples (client name, client id)
"""
return [(client.name, client.id) for k, client in self._clients.items() if type(k) is int]
def rpc_get_client_id(self, client_id,client_name):
"""
Translates the given client name into the client id
(If id is given id will be given back any way)
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:param client_name: client name from the client from which the related id should be delivered
:return:
"""
client = client_id # the client object is here already in (see read loop)
if client_name not in self._clients:
raise StreamError_Target('Given target client (%s) unknown!' % str(client_name))
return self._clients[client_name].id
def rpc_get_channels(self, client_id):
"""
Delivers a list of all public channels that exists on the server
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:return: list of tuples (channel name, channel id)
"""
return [(chl.name, chl.id) for k, chl in self._channels.items() if (type(k) is int) and chl.is_puplic]
def rpc_get_client_info(self, client_id, target_client_name_or_id):
"""
Ask for the client info delivers a dict with client information
id, name and rpc_method description
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:param target_client_name_or_id: client identification name of id
:return: dict with 'ID','NAME', 'RPC_METHOD_DESC' keys
"""
client = client_id # the client object is here already in (see read loop)
if target_client_name_or_id not in self._clients:
raise StreamError_Target('Given target client (%s) unknown!' % str(target_client_name_or_id))
target_client= self._clients[target_client_name_or_id]
return_dict={'ID':target_client.id,
'NAME':target_client.name,
'RPC_METHOD_DESC':target_client.get_rpc_method_desc()
}
return return_dict
def rpc_start_data_transfer_on_channel(self, client_id, chl_id_or_name):
"""
rpc method to start a data transfer transaction on a channel
A transaction id is generated that should be used for the data frames following until the end frame is given
Note:: The transaction isd is a channel specific number and cannot be reused on other channels!
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:param chl_id_or_name: channel identification name of id
:return: transaction id
"""
client = client_id # the client object is here already in (see read loop)
chl = client._write_chls.get(chl_id_or_name)
if chl is None:
raise StreamError_Channel(
'Channel (%s) not found or client is no writer on this channel' % repr(chl_id_or_name))
t_id = chl.get_new_transaction_id()
return t_id
def rpc_start_statistic_channel(self, client_id, fill_rate=100, fixed_slot_time=None):
"""
start the statistic channel to track the load of the server
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:param fill_rate: How often the readers should put their statistic information into the statistic channel
:param fixed_slot_time: Channel might be prioritized higher (not recommended)
:return: statistc channel name is returned so that a reader might be subscribed by the client
"""
if self._statistic_chl is None:
self._statistic_chl_fill_rate = fill_rate
new_id = self._channel_id_hdl.get_new_id()
new_chl = ServeChannel(new_id, self._statistic_channel_name, -1, is_public=True, is_single_in=False,
lifetime=None,
loop=self._loop, address_size=self._address_size,
fixed_slot_time=fixed_slot_time
)
if DEBUG: print('New channel object created', new_chl)
# we fill the channel dict with both id and name (channel can be accessed via name or id)
self._channels[new_id] = new_chl
self._channels[self._statistic_channel_name] = new_chl
self._set_channel_slot_times()
self._statistic_chl = new_chl
for chl in self._channels.values():
chl.set_statistic_channel(new_chl, fill_rate)
return self._statistic_channel_name
else:
# channel exists already!
return self._statistic_channel_name
def rpc_get_statistic_channel_state(self,client_id):
"""
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:return:
* True, channel name - channel is active
* False, channel name -channel is inactive
"""
if self._statistic_chl is not None:
return True,self._statistic_channel_name
else:
return False,self._statistic_channel_name
def rpc_stop_statistic_channel(self, client_id):
"""
stop the statistic channel
:param client_id: first parameter on server rpc commands is always the client id
(will be replaced by client object after frame is received)
:return:
"""
if self._statistic_chl is not None:
statistic_chl = self._statistic_chl
statistic_header = FrameHeader(CT_DATA_END, CODE_MARSHAL, channel_id=statistic_chl.id)
statistic_chl.clear_buffers()
statistic_chl.put_nowait(CT_DATA_END, Frame(statistic_header, 'END_DATA').encode())
statistic_chl._task_trigger.set()
# we wait a short moment before deleting the channel
time.sleep(2)
for chl in self._channels.values():
chl.set_statistic_channel(None, 1)
del self._channels[statistic_chl.name]
del self._channels[statistic_chl.id]
self._set_channel_slot_times()
self._statistic_chl = None
self._channel_id_hdl.free_id(statistic_chl.id)
return True
else:
return False
|
manage.py
|
#!/usr/bin/env python3
import multiprocessing
import os
import signal
import sys
import click
from sqlalchemy import create_engine
try:
from enjoliver import configs, gunicorn_conf
from enjoliver.model import Base
except ModuleNotFoundError:
click.echo('please install enjoliver first: cd enjoliver-api && pip install -e .')
sys.exit(255)
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
APP_PATH = os.path.join(PROJECT_PATH, "enjoliver-api")
def _fs_gunicorn_cleaning(ec):
for directory in [ec.prometheus_multiproc_dir, ec.werkzeug_fs_cache_dir]:
click.echo("cleaning %s" % directory)
try:
for item in os.listdir(directory):
os.remove(os.path.join(directory, item))
except FileNotFoundError:
os.makedirs(directory)
def _init_db(ec):
click.echo("initializing db")
engine = create_engine(ec.db_uri)
Base.metadata.create_all(bind=engine)
def _init_journal_dir(ec):
if not os.path.exists(ec.ignition_journal_dir):
os.makedirs(ec.ignition_journal_dir)
@click.group()
def manage():
pass
@manage.command()
def gunicorn():
ec = configs.EnjoliverConfig(importer=__file__)
_init_db(ec)
_init_journal_dir(ec)
cmd = [
"gunicorn",
"--chdir",
APP_PATH,
"enjoliver.api:gunicorn()",
"--worker-class",
ec.gunicorn_worker_type,
"-b",
ec.gunicorn_bind,
"--log-level",
ec.logging_level.lower(),
"-w",
"%s" % ec.gunicorn_workers,
"-c",
gunicorn_conf.__file__
]
if not os.getenv('prometheus_multiproc_dir', None):
os.environ["prometheus_multiproc_dir"] = ec.prometheus_multiproc_dir
_fs_gunicorn_cleaning(ec)
p = multiprocessing.Process(target=lambda: os.execvpe(cmd[0], cmd, os.environ))
def stop(signum, frame):
click.echo("terminating %d" % p.pid)
p.terminate()
click.echo("starting gunicorn: %s" % " ".join(cmd))
p.start()
with open(ec.gunicorn_pid_file, "w") as f:
f.write("%d" % p.pid)
for sig in [signal.SIGINT, signal.SIGTERM]:
signal.signal(sig, stop)
p.join()
_fs_gunicorn_cleaning(ec)
@manage.command()
def matchbox():
ec = configs.EnjoliverConfig(importer=__file__)
cmd = [
"%s/runtime/matchbox/matchbox" % PROJECT_PATH,
"-address",
ec.matchbox_uri.replace("https://", "").replace("http://", ""),
"-assets-path",
"%s" % ec.matchbox_assets,
"-data-path",
"%s" % ec.matchbox_path,
"-log-level",
ec.matchbox_logging_level.lower(),
]
click.echo("exec[%s] -> %s\n" % (os.getpid(), " ".join(cmd)))
with open(ec.matchbox_pid_file, "w") as f:
f.write("%d" % os.getpid())
os.execve(cmd[0], cmd, os.environ)
@manage.command()
def plan():
ec = configs.EnjoliverConfig(importer=__file__)
cmd = [
'python',
"%s/enjoliver/k8s_2t.py" % APP_PATH,
]
click.echo("exec[%s] -> %s\n" % (os.getpid(), " ".join(cmd)))
with open(ec.plan_pid_file, "w") as f:
f.write("%d" % os.getpid())
os.execvpe(cmd[0], cmd, os.environ)
@manage.command()
def validate():
cmd = [
'python',
"%s/validate.py" % PROJECT_PATH,
]
click.echo("exec[%s] -> %s\n" % (os.getpid(), " ".join(cmd)))
os.execvpe(cmd[0], cmd, os.environ)
@manage.command('show-configs')
def show_configs():
ec = configs.EnjoliverConfig(importer=__file__)
for k, v in ec.__dict__.items():
click.echo("%s=%s" % (k, v))
if __name__ == '__main__':
manage(obj={})
|
managers.py
|
#
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import sys
import threading
import array
import queue
from time import time as _time
from traceback import format_exc
from . import connection
from . import context
from . import pool
from . import process
from . import reduction
from . import util
from . import get_context
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return '%s(typeid=%r, address=%r, id=%r)' % \
(self.__class__.__name__, self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception as e:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
with self.mutex:
result = []
keys = list(self.id_to_obj.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident='0'
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
with self.mutex:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
with self.mutex:
self.id_to_refcount[ident] += 1
def decref(self, c, ident):
with self.mutex:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %r', ident)
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle',
ctx=None):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
assert self._state.value == State.STARTED
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()):
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
with BaseProxy._mutex:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if context.get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %#x>' % \
(type(self).__name__, self._token.typeid, id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return a proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking,) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties',))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting',))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken',))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__'
))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
|
control.py
|
# Modified by Microsoft Corporation.
# Licensed under the MIT license.
# The control module
# Creates and runs control loops at levels: Experiment, Trial, Session
from copy import deepcopy
import pydash as ps
import torch.multiprocessing as mp
from convlab import agent as agent_module
from convlab.agent import Body
from convlab.agent.net import net_util
from convlab.env import make_env
from convlab.experiment import analysis, search
from convlab.lib import logger, util
from convlab.spec import spec_util
def make_agent_env(spec, global_nets=None):
'''Helper to create agent and env given spec'''
env = make_env(spec)
body = Body(env, spec['agent'])
AgentClass = getattr(agent_module, ps.get(spec['agent'][0], 'name'))
agent = AgentClass(spec, body=body, global_nets=global_nets)
return agent, env
def mp_run_session(spec, global_nets, mp_dict):
'''Wrap for multiprocessing with shared variable'''
session = Session(spec, global_nets)
metrics = session.run()
mp_dict[session.index] = metrics
class Session:
'''
The base lab unit to run a RL session for a spec.
Given a spec, it creates the agent and env, runs the RL loop,
then gather data and analyze it to produce session data.
'''
def __init__(self, spec, global_nets=None):
self.spec = spec
self.index = self.spec['meta']['session']
util.set_random_seed(self.spec)
util.set_cuda_id(self.spec)
util.set_logger(self.spec, logger, 'session')
spec_util.save(spec, unit='session')
self.agent, self.env = make_agent_env(self.spec, global_nets)
with util.ctx_lab_mode('eval'): # env for eval
self.eval_env = make_env(self.spec)
self.agent.body.eval_env = self.eval_env
self.num_eval = ps.get(self.agent.spec, 'meta.num_eval')
self.warmup_epi = ps.get(self.agent.agent_spec, 'algorithm.warmup_epi') or -1
logger.info(util.self_desc(self))
def to_ckpt(self, env, mode='eval'):
'''Check with clock whether to run log/eval ckpt: at the start, save_freq, and the end'''
if mode == 'eval' and util.in_eval_lab_modes(): # avoid double-eval: eval-ckpt in eval mode
return False
clock = env.clock
frame = clock.get()
frequency = env.eval_frequency if mode == 'eval' else env.log_frequency
if frame == 0 or clock.get('opt_step') == 0: # avoid ckpt at init
to_ckpt = False
elif frequency is None: # default episodic
to_ckpt = env.done
else: # normal ckpt condition by mod remainder (general for venv)
to_ckpt = util.frame_mod(frame, frequency, env.num_envs) or frame == clock.max_frame
return to_ckpt
def try_ckpt(self, agent, env):
'''Check then run checkpoint log/eval'''
body = agent.body
if self.to_ckpt(env, 'log') and self.env.clock.get('epi') > self.warmup_epi:
body.train_ckpt()
body.log_summary('train')
if self.to_ckpt(env, 'eval'):
avg_return, avg_len, avg_success, avg_p, avg_r, avg_f1, avg_book_rate = analysis.gen_avg_result(agent, self.eval_env, self.num_eval)
body.eval_ckpt(self.eval_env, avg_return, avg_len, avg_success)
body.log_summary('eval')
if body.eval_reward_ma >= body.best_reward_ma:
body.best_reward_ma = body.eval_reward_ma
agent.save(ckpt='best')
if self.env.clock.get('epi') > self.warmup_epi:
if len(body.train_df) > 1: # need > 1 row to calculate stability
metrics = analysis.analyze_session(self.spec, body.train_df, 'train')
if len(body.eval_df) > 1: # need > 1 row to calculate stability
metrics = analysis.analyze_session(self.spec, body.eval_df, 'eval')
def run_eval(self):
avg_return, avg_len, avg_success, avg_p, avg_r, avg_f1, avg_book_rate = analysis.gen_avg_result(self.agent, self.eval_env, self.num_eval)
result = f'{self.num_eval} episodes, {avg_return:.2f} return'
if not avg_success is None:
result += f', {avg_success*100:.2f}% success rate'
if avg_len:
result += f', {avg_len:.2f} turns'
if avg_p:
result += f', {avg_p:.2f} P, {avg_r:.2f} R, {avg_f1:.2f} F1'
if avg_book_rate:
result += f', {avg_book_rate*100:.2f}% book rate'
logger.info(result)
def run_rl(self):
'''Run the main RL loop until clock.max_frame'''
logger.info(f'Running RL loop for trial {self.spec["meta"]["trial"]} session {self.index}')
clock = self.env.clock
obs = self.env.reset()
clock.tick('t')
self.agent.reset(obs)
done = False
while True:
if util.epi_done(done): # before starting another episode
logger.nl(f'A dialog session is done')
self.try_ckpt(self.agent, self.env)
if clock.get() < clock.max_frame: # reset and continue
clock.tick('epi')
obs = self.env.reset()
self.agent.reset(obs)
done = False
self.try_ckpt(self.agent, self.env)
if clock.get() >= clock.max_frame: # finish
break
clock.tick('t')
action = self.agent.act(obs)
next_obs, reward, done, info = self.env.step(action)
self.agent.update(obs, action, reward, next_obs, done)
obs = next_obs
def close(self):
'''Close session and clean up. Save agent, close env.'''
self.agent.close()
self.env.close()
self.eval_env.close()
logger.info(f'Session {self.index} done')
def run(self):
if util.in_eval_lab_modes():
self.run_eval()
metrics = None
else:
self.run_rl()
metrics = analysis.analyze_session(self.spec, self.agent.body.eval_df, 'eval')
self.close()
return metrics
class Trial:
'''
The lab unit which runs repeated sessions for a same spec, i.e. a trial
Given a spec and number s, trial creates and runs s sessions,
then gathers session data and analyze it to produce trial data.
'''
def __init__(self, spec):
self.spec = spec
self.index = self.spec['meta']['trial']
util.set_logger(self.spec, logger, 'trial')
spec_util.save(spec, unit='trial')
def parallelize_sessions(self, global_nets=None):
mp_dict = mp.Manager().dict()
# spec_util.tick(self.spec, 'session')
# mp_run_session(deepcopy(self.spec), global_nets, mp_dict)
workers = []
for _s in range(self.spec['meta']['max_session']):
spec_util.tick(self.spec, 'session')
w = mp.Process(target=mp_run_session, args=(deepcopy(self.spec), global_nets, mp_dict))
w.start()
workers.append(w)
for w in workers:
w.join()
session_metrics_list = [mp_dict[idx] for idx in sorted(mp_dict.keys())]
return session_metrics_list
def run_sessions(self):
logger.info('Running sessions')
session_metrics_list = self.parallelize_sessions()
return session_metrics_list
def init_global_nets(self):
session = Session(deepcopy(self.spec))
session.env.close() # safety
global_nets = net_util.init_global_nets(session.agent.algorithm)
return global_nets
def run_distributed_sessions(self):
logger.info('Running distributed sessions')
global_nets = self.init_global_nets()
session_metrics_list = self.parallelize_sessions(global_nets)
return session_metrics_list
def close(self):
logger.info(f'Trial {self.index} done')
def run(self):
if self.spec['meta'].get('distributed') == False:
session_metrics_list = self.run_sessions()
else:
session_metrics_list = self.run_distributed_sessions()
metrics = analysis.analyze_trial(self.spec, session_metrics_list)
self.close()
# return metrics['scalar']
return metrics
class Experiment:
'''
The lab unit to run experiments.
It generates a list of specs to search over, then run each as a trial with s repeated session,
then gathers trial data and analyze it to produce experiment data.
'''
def __init__(self, spec):
self.spec = spec
self.index = self.spec['meta']['experiment']
util.set_logger(self.spec, logger, 'trial')
spec_util.save(spec, unit='experiment')
def close(self):
logger.info('Experiment done')
def run(self):
trial_data_dict = search.run_ray_search(self.spec)
experiment_df = analysis.analyze_experiment(self.spec, trial_data_dict)
self.close()
return experiment_df
|
gui.pyw
|
import re
from prettytable import PrettyTable
from requests.exceptions import ConnectTimeout, ConnectionError
from utils import download
from cover import Cover
from change import Change
import pyperclip
import time
import re
import os
import json
import signal
import threading
import tkinter
import tkinter.font as tf
from tkinter.filedialog import askdirectory
from tkinter import(Menu, Frame, LabelFrame, Message, messagebox,
Text, Entry, Button, ttk, Label, scrolledtext, INSERT, END, BOTH, LEFT, RIGHT, Checkbutton, IntVar, StringVar)
from extractor import (acfun, baidutieba, bilibili, changya, douyin, haokan,
ku6, kuaishou, kugou, kuwo, lizhiFM, lofter, music163,
open163, pearvideo, pipigaoxiao, pipix, qianqian,
qingshipin, qqmusic, quanminkge, qutoutiao, sing5,
sohuTV, ted, tudou, wechat_article_cover, weibo, weishi,
xiaokaxiu, xinpianchang, zhihu_video, zuiyou_voice, tuchong, mgtv, iqiyi, qqtv, letv)
sep = os.sep
funcMap = {
'douyin': douyin,
'5sing': sing5,
'kugou': kugou,
'acfun': acfun,
'bilibili': bilibili,
'pipix': pipix,
'kuwo': kuwo,
'ku6': ku6,
'haokan': haokan,
'music.163': music163,
'y.qq': qqmusic,
'mgtv': mgtv,
'iqiyi': iqiyi,
'v.qq': qqtv,
'le.com': letv
}
if not os.path.exists('config'):
os.makedirs('config')
showinfo = '''
请在 config.json 文件配置下载路径
输入内容包含完整url链接的内容即可,自动提取
- 抖音视频分享链接
https://v.douyin.com/w3G93b/
- bilibili视频,番剧链接
https://www.bilibili.com/video/BV1mp4y1X7sr
- acfun视频链接
https://www.acfun.cn/v/ac15179107
- 5sing音乐链接
http://5sing.kugou.com/fc/17457264.html#hash=86B0B6CCBE1341065931558886E4C07F&album_id=37236081
- 酷狗音乐链接
https://www.kugou.com/song/
- 酷我音乐链接
http://www.kuwo.cn/play_detail/1850233
- 酷6视频
https://www.lizhi.fm/1244717/2587320075826653702
- 好看视频
https://haokan.baidu.com/v?vid=10046831123777078980&tab=recommend
- 网易云音乐
https://music.163.com/#/song?id=5266159
- QQ音乐 分享链接
https://c.y.qq.com/base/fcgi-bin/u?__=eawrb35
如果需要下载 bilibili 高画质视频,请自行设置 sessData
sessData用于判断登录状态和是否会员,网页登录BiliBili后,按F12 Application中cookie可以找到
'''
'''
'name': url,
'index': 1,
'total': 1,
'item_index': 1,
'item_total': 1,
'title': '',
'done': False
'''
progress = []
waiting = []
'''
主窗口
'''
top = tkinter.Tk()
top.title('下载器')
w = 1000
h = 500
ws = top.winfo_screenwidth()
hs = top.winfo_screenheight()
# 计算 x, y 位置
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
top.geometry('%dx%d+%d+%d' % (w, h, x, 100))
'''
菜单
'''
def runCover():
top.iconify()
Cover()
def runChange():
top.iconify()
Change()
menubar = Menu(top)
menubar.add_command(label="生成封面", command=runCover)
menubar.add_command(label="转码", command=runChange)
top['menu'] = menubar
'''
布局
'''
frmLeft = LabelFrame(width=300, height=100, text='状态', labelanchor='nw')
frmSelectPath = Frame(width=100, height=100)
frmTop = Frame(width=100, height=100)
frmBottom = LabelFrame(width=500, text='下载列表', height=400)
frmLeft.pack(side='left', fill='y', padx='5', pady='5')
frmSelectPath.pack(side='top', fill='x', padx='5', pady='5')
frmTop.pack(side='top', fill='x', padx='5', pady='5')
frmBottom.pack(side='right', fill=BOTH, expand='yes', padx='5', pady='5')
'''
控件
'''
clupValue = IntVar()
clipButton = Checkbutton(frmLeft, text="剪贴板",
onvalue=1, offvalue=0, height=1, variable=clupValue)
clipButton.pack(side='top', expand='no', fill=None)
def showinfoEvent():
messagebox.showinfo('提示', showinfo)
inputText = scrolledtext.ScrolledText(frmLeft, bd=0, width=50, bg=None)
inputText.pack(side='left', fill='y', expand='no')
message = Button(frmSelectPath, bd=1, bg=None,
text='支持哪些??', command=showinfoEvent)
message.pack(side='left', fill=None, expand='no')
selectPathLabel = Label(frmSelectPath, text="保存路径文件夹:")
selectPathLabel.pack(side="left")
downloadPath = StringVar(top, value='download')
inputSelectPath = Entry(frmSelectPath, bd=0,
textvariable=downloadPath, state='readonly')
inputSelectPath.pack(side="left", fill="x", expand='yes')
def selectPath():
path_ = askdirectory()
if path_ and os.path.exists(path_):
downloadPath.set(path_.replace('/', sep).replace('\\', sep))
writeConfig()
def openPath():
os.system(f'start explorer {downloadPath.get()}')
Button(frmSelectPath, bd=1, bg=None,
text='打开文件夹', command=openPath).pack(side='right')
Button(frmSelectPath, bd=1, bg=None,
text='选择文件夹', command=selectPath).pack(side='right')
inputLabel = Label(frmTop, text="输入带url的内容:")
inputLabel.pack(side="left")
inputUrlValue = StringVar()
inputUrl = Entry(frmTop, bd=0, textvariable=inputUrlValue)
inputUrl.pack(side="left", fill="x", expand='yes')
def readUrls(text):
res = []
text = text.replace('\n', '杠嗯杠嗯').replace(' ', '')
urls = re.findall(
r"https?://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]\.[-A-Za-z]+[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]", text)
if urls:
for idx, url in enumerate(urls):
for key in funcMap:
if(key in url):
res.append(url)
break
return res
def readUrlCallBack():
'''
匹配url 然后去读取
'''
urls = readUrls(inputUrlValue.get())
inputUrlValue.set('')
if not urls:
return
for url in urls:
appendWaiting(url)
inputBtn = Button(frmTop, bd=1, text="开始解析>>>", command=readUrlCallBack)
inputBtn.pack()
'''
表格
'''
columns = ("名称", '大小', '状态', '任务', "进度")
treeview = ttk.Treeview(frmBottom, height=18, background=None, foreground=None,
show="headings", columns=columns) # 表格
for i in columns:
treeview.column(i, width=100, anchor='center') # 表示列,不显示
treeview.heading(i, text=i) # 显示表头
treeview.pack(side='left', fill='both', expand='yes')
scroll1 = ttk.Scrollbar(frmBottom, orient='vertical', command=treeview.yview)
scroll1.place(relx=0.971, rely=0.028, relwidth=0.024, relheight=0.958)
treeview.configure(yscrollcommand=scroll1.set)
def reloadTreeview():
for _ in map(treeview.delete, treeview.get_children("")):
pass
index = 0
for item in progress:
title = item.get('title')
status = '下载中'
total = '{:.2f}MB'.format(item['total'] / 1024/1024)
if total == 0:
_progress = '0%'
else:
_progress = '{:.2f}%'.format(
item['index'] / item['total'] * 100)
itemIndex = ''
if item["item_total"] == 0:
itemIndex = '0/0'
else:
itemIndex = f'{item["item_index"]}/{item["item_total"]}'
treeview.insert('', index, values=(title,
total, status, itemIndex, _progress))
index += 1
for item in waiting:
treeview.insert('', index, values=('',
0, '0/0', '等待中', '0%'))
index += 1
treeview.after(30, reloadTreeview)
reloadTreeview()
# 方法
'''
'name': url,
'index': 1,
'total': 1,
'item_index': 1,
'item_total': 1,
'title': '',
'done': False
'''
# 添加到等待下载
clipCaches = []
def appendWaiting(url):
if url in waiting:
return False
waiting.append(url)
appendClip(url)
# 添加到下载
def appendProgress(url):
index = -1
for idx, item in enumerate(progress):
_name = item.get('name')
if _name == url:
index = idx
if index == -1:
progress.append({
'name': url,
'index': 1,
'total': 1,
'item_index': 1,
'item_total': 1,
'title': '',
'done': False
})
spider(callbackFunc, url)
# 输出文字状态信息
colorNames = ['err', 'warn', 'default', 'success', 'info']
colors = ['#d40505', '#b9840d', 'black', 'green', 'blue']
ft = tf.Font(family='微软雅黑', size=10) # 有很多参数
for idx, color in enumerate(colorNames):
inputText.tag_add(color, END) # 申明一个tag,在a位置使用
# 设置tag即插入文字的大小,颜色等
inputText.tag_config(color, foreground=colors[idx], font=ft)
texts = []
textsOldLength = 0
def loopText():
global textsOldLength
length = len(texts)
while len(texts) > 100:
texts.pop(0)
if textsOldLength < length:
for text in texts:
color = text.get('color')
txt = text.get('text')
if color in colorNames:
inputText.insert(END, f'\n\n{txt}', color)
else:
inputText.insert(END, f'\n\n{txt}')
inputText.see(END)
textsOldLength = len(texts)
inputText.after(30, loopText)
loopText()
def setStatusText(text, color='default'):
texts.append({
'text': text,
'color': color
})
# 读取配置
configPath = 'config/config.json'
def readConfig():
with open(configPath, 'r', encoding='utf-8') as f:
data = json.loads(f.read() or '{}')
f.close
return data
def writeConfig():
config['downloadPath'] = downloadPath.get()
with open(configPath, 'w') as f:
f.write(json.dumps(config))
f.close
if not os.path.exists(configPath):
config = {
'sessData': '',
'downloadPath': 'download',
'isClip': '0'
}
with open(configPath, 'w') as f:
f.write(json.dumps(config))
f.close
config = readConfig()
sessData = config.get('sessData') or ''
downloadPath.set((config.get('downloadPath') or 'download').replace(
'/', sep).replace('\\', sep))
isClip = config.get('isClip') or '0'
if isClip == '1':
clipButton.select()
def filterName(name):
regexp = re.compile(r'(/|\\|:|\?|\*|\||"|\'|<|>|\$)')
space = re.compile(r'\s{2,}')
return space.sub(" ", regexp.sub("", name))
clipData = ''
clipUrls = []
def writeClip(data):
with open('config/clipsgui.txt', 'w') as f:
f.write('\n'.join(data))
f.close
def readClip():
data = ''
if os.path.exists('config/clipsgui.txt'):
with open('config/clipsgui.txt', 'r', encoding='utf-8') as f:
data = f.read() or ''
f.close
return readUrls(data)
def appendClip(url):
if url in clipUrls:
return False
clipUrls.append(url)
writeClip(clipUrls)
clipUrls = readClip()
if clipUrls:
for i in clipUrls:
appendWaiting(i)
def removeClip(url):
try:
clipUrls.remove(url)
writeClip(clipUrls)
except BaseException:
pass
def listenerClip():
'''
读取剪贴板
'''
global clipData
while 1:
if clupValue.get() == 1:
jiantieban = pyperclip.paste() or ''
if jiantieban != clipData:
clipData = jiantieban
urls = readUrls(clipData)
if urls:
for url in urls:
if url in clipCaches:
continue
clipCaches.append(url)
appendWaiting(url)
setStatusText(f'\n\n收到剪贴板的url:{url}\n')
time.sleep(0.1)
def callbackFunc(data):
_type = data.get('type')
if _type == 'progress':
index = data.get('index')
total = data.get('total')
name = data.get('name')
title = data.get('title')
for item in progress:
if item['name'] == name:
if index:
item['index'] = index
if total:
item['total'] = total
if title:
item['title'] = title
if index >= total and item['item_index'] >= item['item_total']:
item['done'] = True
break
if _type == 'data':
name = data.get('name')
for item in progress:
if item['name'] == name:
index = data.get('index')
total = data.get('total')
title = data.get('title')
if title:
item['title'] = title
if index:
item['item_index'] = index
if total:
item['item_total'] = total
if index >= total and item['index'] >= item['total']:
item['done'] = True
break
if _type == 'msg':
msg = data.get('msg')
color = data.get('color')
if msg:
setStatusText(msg, color=color)
def get(func, url=None):
if url is None:
return {"msg": "请输入链接"}
f = None
for key in funcMap:
if(key in url):
path = f'{downloadPath.get()}\\{key}'
if not os.path.exists(path):
try:
os.makedirs(path)
except BaseException:
pass
if key == 'bilibili':
data = bilibili.get(url, savepath=path,
func=func, sessData=sessData)
return {}
if key == 'mgtv':
data = mgtv.get(url, savepath=path,
func=func)
return {}
if key == 'iqiyi':
data = iqiyi.get(url, savepath=path,
func=func)
return {}
if key == 'v.qq':
data = qqtv.get(url, savepath=path,
func=func)
return {}
if key == 'le.com':
data = letv.get(url, savepath=path,
func=func)
return {}
if key == 'douyin':
data = douyin.get(url, func=func)
data['pathname'] = path
return data
else:
f = funcMap[key]
data = f.get(url)
data['pathname'] = path
return data
return {"msg": "链接无法解析"}
def spider(func, url):
data = get(func, url)
text = data.get("text")
msg = data.get("msg")
if msg or text:
setStatusText(f'{text}:{msg or text}')
func({
'type': 'data',
'name': url,
'index': 1,
'total': 1
})
return
title = data.get("title")
audioName = data.get("audioName")
videoName = data.get("videoName")
imgs = data.get("imgs")
audios = data.get("audios")
videos = data.get("videos")
pathName = data.get('pathname')
m4s = data.get('m4s')
file_name = (audioName or videoName or title or None)
savePath = pathName
downs = [
{'type': 'jpg', 'data': imgs or []},
{'type': 'mp3', 'data': audios or []},
{'type': 'mp4', 'data': videos or []},
{'type': 'm4s', 'data': m4s or []},
]
index = 0
length = len(imgs or []) + len(audios or []) + len(videos or [])
if length == 0:
func({
'type': 'data',
'name': url,
'index': 1,
'total': 1
})
func({
'type': 'progress',
'name': url,
'index': 1,
'total': 1
})
return
func({
'type': 'data',
'name': url,
'title': file_name,
'index': index,
'total': length
})
for v in downs:
for vv in v['data']:
_url = vv
_headers = None
if not isinstance(vv, str):
if vv['name']:
file_name = filterName(vv['name'])
_url = vv['url']
_headers = vv.get('headers', None)
func({
'type': 'data',
'name': url,
'title': file_name,
'index': index,
'total': length
})
download(_url, file_name=file_name,
file_type=v['type'], save_path=savePath, headers=_headers, func=func, pname=url)
index += 1
func({
'type': 'data',
'name': url,
'index': index,
'total': length
})
def start(url):
appendProgress(url)
def loopList():
while 1:
length = len(progress)-1
while length >= 0:
if progress[length]['done'] == True:
setStatusText(
f'{progress[length]["title"]}:{progress[length]["name"]} 下载完成!', color='success')
removeClip(progress[length]['name'])
progress.pop(length)
length -= 1
while len(progress) < 5 and len(waiting) > 0:
if not os.path.exists(downloadPath.get()):
try:
os.path.makedirs(downloadPath.get())
except BaseException:
setStatusText(f'{downloadPath.get()} 路径不存在')
break
url = waiting.pop(0)
t = threading.Thread(
target=start, args=(url,))
t.setDaemon(True)
t.start()
time.sleep(1)
t1 = threading.Thread(target=loopList)
t1.setDaemon(True)
t1.start()
t2 = threading.Thread(target=listenerClip)
t2.setDaemon(True)
t2.start()
def closeWindow():
ans = messagebox.askyesno(title='提示', message='是否确认关闭?')
if ans:
top.destroy()
else:
return
top.protocol('WM_DELETE_WINDOW', closeWindow)
top.mainloop()
|
command.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import enum
import json
import logging
import os
import re
import resource
import signal
import subprocess
import sys
import threading
from abc import ABC, abstractmethod
from typing import Iterable, List, Optional
from .. import (
find_local_root,
find_log_directory,
find_project_root,
is_capable_terminal,
json_rpc,
log,
readable_directory,
)
from ..analysis_directory import AnalysisDirectory, resolve_analysis_directory
from ..configuration import Configuration
from ..exceptions import EnvironmentException
from ..filesystem import remove_if_exists, translate_path
from ..process import register_non_unique_process
from ..socket_connection import SocketConnection, SocketException
TEXT: str = "text"
JSON: str = "json"
LOG: logging.Logger = logging.getLogger(__name__)
class ClientException(Exception):
pass
class State(enum.IntEnum):
DEAD = 0
RUNNING = 1
class ExitCode(enum.IntEnum):
SUCCESS = 0
FOUND_ERRORS = 1
FAILURE = 2
BUCK_ERROR = 3
# If the process exited due to a signal, this will be the negative signal number.
SIGSEGV = -signal.SIGSEGV
class IncrementalStyle(enum.Enum):
SHALLOW = "shallow"
FINE_GRAINED = "fine_grained"
def __str__(self) -> str:
return self.value
class ProfileOutput(enum.Enum):
TRACE_EVENT: str = "trace_event"
COLD_START_PHASES: str = "cold_start_phases"
INCREMENTAL_UPDATES: str = "incremental_updates"
INDIVIDUAL_TABLE_SIZES: str = "individual_table_sizes"
TOTAL_SHARED_MEMORY_SIZE_OVER_TIME: str = "total_shared_memory_size_over_time"
def __str__(self) -> str:
return self.value
class Result:
def __init__(self, code: int, output: str) -> None:
self.code: int = code
self.output: str = output
def check(self) -> None:
if self.code != ExitCode.SUCCESS:
description = ":\n{}".format(self.output) if self.output else ""
if self.code == ExitCode.SIGSEGV:
description += (
"\nThis is a Pyre bug. Please re-run Pyre with --debug "
"and provide the output to the developers."
)
raise ClientException(
"Client exited with error code {}{}".format(self.code, description)
)
def typeshed_search_path(typeshed_root: str) -> List[str]:
search_path = []
typeshed_subdirectories = ["stdlib", "third_party"]
for typeshed_subdirectory_name in typeshed_subdirectories:
typeshed_subdirectory = os.path.join(typeshed_root, typeshed_subdirectory_name)
if (
not os.path.isdir(typeshed_subdirectory)
or typeshed_subdirectory_name == "tests"
or typeshed_subdirectory_name[0] == "."
):
continue
# Always prefer newer version over older version
version_names = sorted(os.listdir(typeshed_subdirectory), reverse=True)
for version_name in version_names:
# Anything under 2/ or 2.x is unusable for Pyre
if version_name.startswith("2") and version_name != "2and3":
continue
search_path.append(os.path.join(typeshed_subdirectory, version_name))
return search_path
def _convert_json_response_to_result(response: json_rpc.Response) -> Result:
if response.error:
error_code = ExitCode.FAILURE
else:
error_code = ExitCode.SUCCESS
return Result(output=json.dumps(response.result), code=error_code)
def executable_file(file_path: str) -> str:
if not os.path.isfile(file_path):
raise EnvironmentException("%s is not a valid file" % file_path)
if not os.access(file_path, os.X_OK):
raise EnvironmentException("%s is not an executable file" % file_path)
return file_path
class CommandParser(ABC):
NAME = ""
HIDDEN = False
_exit_code: ExitCode = ExitCode.SUCCESS
def __init__(self, arguments: argparse.Namespace, original_directory: str) -> None:
self._arguments = arguments
self._local_configuration: Optional[str] = arguments.local_configuration
self._version: bool = arguments.version
self._debug: bool = arguments.debug
self._sequential: bool = arguments.sequential
self._strict: bool = arguments.strict
self._additional_checks: List[str] = arguments.additional_check
self._show_error_traces: bool = arguments.show_error_traces
self._output: str = arguments.output
self._verbose: bool = arguments.verbose
self._enable_profiling: bool = arguments.enable_profiling
self._enable_memory_profiling: bool = arguments.enable_memory_profiling
self._noninteractive: bool = arguments.noninteractive
self._hide_parse_errors: bool = arguments.hide_parse_errors
self._logging_sections: str = arguments.logging_sections
self._log_identifier: str = arguments.log_identifier
self._log_directory: str = arguments.log_directory
self._logger: str = arguments.logger
self._formatter: List[str] = arguments.formatter
self._targets: List[str] = arguments.targets
self._build: bool = arguments.build
self._use_buck_builder: bool = arguments.use_buck_builder
self._use_legacy_builder: bool = arguments.use_legacy_builder
self._buck_builder_debug: bool = arguments.buck_builder_debug
self._source_directories: List[str] = arguments.source_directories
self._filter_directory: List[str] = arguments.filter_directory
self._use_global_shared_analysis_directory: bool = arguments.use_global_shared_analysis_directory
self._no_saved_state: bool = arguments.no_saved_state
self._search_path: List[str] = arguments.search_path
self._preserve_pythonpath: bool = arguments.preserve_pythonpath
self._binary: str = arguments.binary
self._buck_builder_binary: Optional[str] = arguments.buck_builder_binary
self._buck_builder_target: Optional[str] = arguments.buck_builder_target
self._exclude: List[str] = arguments.exclude
self._typeshed: str = arguments.typeshed
self._save_initial_state_to: Optional[str] = arguments.save_initial_state_to
self._load_initial_state_from: Optional[str] = arguments.load_initial_state_from
self._changed_files_path: Optional[str] = arguments.changed_files_path
self._saved_state_project: Optional[str] = arguments.saved_state_project
# Derived arguments
self._capable_terminal: bool = is_capable_terminal()
self._original_directory: str = original_directory
self._current_directory: str = find_project_root(self._original_directory)
self._local_configuration = self._local_configuration or find_local_root(
self._original_directory
)
self._log_directory: str = find_log_directory(
self._log_directory, self._current_directory, self._local_configuration
)
logger = self._logger
if logger:
self._logger = translate_path(self._original_directory, logger)
if self._debug or not self._capable_terminal:
self._noninteractive = True
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"-l", "--local-configuration", type=str, help="Use a local configuration"
)
parser.add_argument(
"--version",
action="store_true",
help="Print the client and binary versions of Pyre.",
)
parser.add_argument("--debug", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--sequential", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--strict", action="store_true", help=argparse.SUPPRESS)
parser.add_argument(
"--additional-check", action="append", help=argparse.SUPPRESS
)
parser.add_argument(
"--show-error-traces",
action="store_true",
help="Display errors trace information",
)
# Logging.
parser.add_argument(
"--output", choices=[TEXT, JSON], default=TEXT, help="How to format output"
)
parser.add_argument(
"--verbose", action="store_true", help="Enable verbose logging"
)
parser.add_argument(
"--enable-profiling", action="store_true", help=argparse.SUPPRESS
)
parser.add_argument(
"--enable-memory-profiling", action="store_true", help=argparse.SUPPRESS
)
parser.add_argument(
"-n",
"--noninteractive",
action="store_true",
help="Disable interactive logging",
)
parser.add_argument(
"--hide-parse-errors",
action="store_true",
help="Hide detailed information about parse errors",
)
parser.add_argument(
"--logging-sections", help=argparse.SUPPRESS # Enable sectional logging.
)
parser.add_argument(
"--log-identifier",
default="",
help=argparse.SUPPRESS, # Add given identifier to logged samples.
)
parser.add_argument(
"--log-directory",
help=argparse.SUPPRESS, # Override default location for logs
)
parser.add_argument(
"--logger", help=argparse.SUPPRESS # Specify custom logging binary.
)
parser.add_argument("--formatter", help=argparse.SUPPRESS)
# Link tree determination.
buck_arguments = parser.add_argument_group("buck")
buck_arguments.add_argument(
"--target", action="append", dest="targets", help="The buck target to check"
)
buck_arguments.add_argument(
"--build",
action="store_true",
help="Freshly build all the necessary artifacts.",
)
buck_arguments.add_argument(
"--use-buck-builder",
action="store_true",
help="Use Pyre's experimental builder for Buck projects.",
)
buck_arguments.add_argument(
"--use-legacy-builder",
action="store_true",
help="Use Pyre's legacy builder for Buck projects.",
)
buck_arguments.add_argument(
"--buck-builder-debug", action="store_true", help=argparse.SUPPRESS
)
buck_arguments.add_argument(
"--buck-mode", type=str, help="Mode to pass to `buck query`"
)
source_directories = parser.add_argument_group("source-directories")
source_directories.add_argument(
"--source-directory",
action="append",
dest="source_directories",
help="The source directory to check",
type=os.path.abspath,
)
source_directories.add_argument(
"--filter-directory", help=argparse.SUPPRESS # override filter directory
)
parser.add_argument(
"--use-global-shared-analysis-directory",
action="store_true",
help=argparse.SUPPRESS,
)
parser.add_argument(
"--no-saved-state",
action="store_true",
help="Don't attempt to load Pyre from a saved state.",
)
# Handling of search path
parser.add_argument(
"--search-path",
action="append",
default=[],
type=readable_directory,
help="Add an additional directory of modules and stubs to include"
" in the type environment",
)
parser.add_argument(
"--preserve-pythonpath",
action="store_true",
default=False,
help="Preserve the value of the PYTHONPATH environment variable and "
"inherit the current python environment's search path",
)
parser.add_argument(
"--binary",
default=None,
type=executable_file,
help="Location of the pyre binary",
)
parser.add_argument(
"--buck-builder-binary",
default=None,
help="Location of the buck builder binary",
)
parser.add_argument(
"--buck-builder-target", default=None, help=argparse.SUPPRESS
)
parser.add_argument(
"--exclude",
action="append",
default=[],
help="Exclude files and directories matching this regexp from parsing",
)
# Typeshed stubs location
parser.add_argument(
"--typeshed",
default=None,
type=readable_directory,
help="Location of the typeshed stubs",
)
parser.add_argument(
"--save-initial-state-to",
default=None,
help="Path to serialize pyre's initial state to.",
)
parser.add_argument(
"--load-initial-state-from", default=None, type=str, help=argparse.SUPPRESS
)
parser.add_argument(
"--changed-files-path", default=None, type=str, help=argparse.SUPPRESS
)
parser.add_argument(
"--saved-state-project", default=None, type=str, help=argparse.SUPPRESS
)
parser.add_argument(
"--features", default=None, type=str, help=argparse.SUPPRESS
)
# Temporary flag to help migrate to json sockets for incremental and query
# commands.
parser.add_argument(
"--use-json-sockets",
action="store_true",
default=False,
help=argparse.SUPPRESS,
)
@classmethod
@abstractmethod
def add_subparser(cls, parser: argparse._SubParsersAction) -> None:
pass
@abstractmethod
def _run(self) -> None:
""" Abstract method expected to be overridden by subclasses. """
pass
def run(self) -> "CommandParser":
self._run()
return self
def cleanup(self) -> None:
pass
def exit_code(self) -> ExitCode:
return self._exit_code
@property
def configuration(self) -> Optional[Configuration]:
return None
@property
def current_directory(self) -> Optional[str]:
return self._current_directory
@property
def local_configuration(self) -> Optional[str]:
return self._local_configuration
@property
def log_directory(self) -> str:
return self._log_directory
@property
def noninteractive(self) -> bool:
return self._noninteractive
class Command(CommandParser, ABC):
_buffer: List[str] = []
_call_client_terminated: bool = False
_local_root: str = ""
def __init__(
self,
arguments: argparse.Namespace,
original_directory: str,
configuration: Optional[Configuration] = None,
analysis_directory: Optional[AnalysisDirectory] = None,
) -> None:
super(Command, self).__init__(arguments, original_directory)
local_configuration = self._local_configuration
if local_configuration:
self._local_root = (
local_configuration
if os.path.isdir(local_configuration)
else os.path.dirname(local_configuration)
)
else:
self._local_root = self._original_directory
self._configuration: Configuration = (
configuration or self.generate_configuration()
)
self._strict: bool = arguments.strict or self._configuration.strict
self._logger: str = arguments.logger or (configuration and configuration.logger)
self._ignore_all_errors_paths: Iterable[str] = (
self._configuration.ignore_all_errors
)
self._number_of_workers: int = self._configuration.number_of_workers
self._version_hash: str = self._configuration.version_hash
self._formatter: Optional[str] = self._configuration.formatter
self._taint_models_path: List[str] = [
translate_path(self._original_directory, path)
for path in self._configuration.taint_models_path
]
self._analysis_directory: AnalysisDirectory = (
analysis_directory or self.generate_analysis_directory()
)
self._features: Optional[str] = arguments.features
@classmethod
def add_subparser(cls, parser: argparse._SubParsersAction) -> None:
pass
def generate_configuration(self) -> Configuration:
return Configuration(
local_configuration=self._local_configuration,
search_path=self._search_path,
binary=self._binary,
typeshed=self._typeshed,
preserve_pythonpath=self._preserve_pythonpath,
excludes=self._exclude,
logger=self._logger,
formatter=self._formatter,
log_directory=self._log_directory,
)
def generate_analysis_directory(self) -> AnalysisDirectory:
configuration = self._configuration
if not configuration:
return AnalysisDirectory(".")
else:
return resolve_analysis_directory(
self._arguments,
configuration,
self._original_directory,
self._current_directory,
)
def run(self) -> "Command":
configuration = self._configuration
if configuration and configuration.disabled:
LOG.log(log.SUCCESS, "Pyre will not run due to being explicitly disabled")
else:
self._run()
return self
def _run(self) -> None:
pass
def cleanup(self) -> None:
self._analysis_directory.cleanup()
def _flags(self) -> List[str]:
flags = []
if self._debug:
flags.extend(["-debug"])
if self._sequential:
flags.extend(["-sequential"])
if self._strict:
flags.extend(["-strict"])
if self._additional_checks:
flags.append("-additional-checks")
flags.append(",".join(self._additional_checks))
if self._show_error_traces:
flags.append("-show-error-traces")
if self._verbose:
flags.append("-verbose")
if not self._hide_parse_errors:
if self._logging_sections:
self._logging_sections = self._logging_sections + ",parser"
else:
self._logging_sections = "parser"
if not self._capable_terminal:
# Disable progress reporting for non-capable terminals.
# This helps in reducing clutter.
if self._logging_sections:
self._logging_sections = self._logging_sections + ",-progress"
else:
self._logging_sections = "-progress"
if self._logging_sections:
flags.extend(["-logging-sections", self._logging_sections])
if self._enable_profiling:
flags.extend(["-profiling-output", self.profiling_log_path()])
if self._enable_memory_profiling:
flags.extend(["-memory-profiling-output", self.profiling_log_path()])
if self._enable_profiling or self._enable_memory_profiling:
# Clear the profiling log first since in pyre binary it's append-only
remove_if_exists(self.profiling_log_path())
if self._current_directory:
flags.extend(["-project-root", self._current_directory])
if self._log_identifier:
flags.extend(["-log-identifier", self._log_identifier])
if self._logger:
flags.extend(["-logger", self._logger])
if self._log_directory:
flags.extend(["-log-directory", self._log_directory])
return flags
# temporarily always return empty list to unblock client release
def _feature_flags(self) -> List[str]:
features = self._features
if features:
lsp_features = ["click_to_fix", "hover", "go_to_definition"]
filtered = {
key: value
for key, value in json.loads(features).items()
if key in lsp_features
}
return ["-features", json.dumps(filtered)]
return []
def _read_stdout(self, stdout: Iterable[bytes]) -> None:
self._buffer = []
for line in stdout:
self._buffer.append(line.decode())
def _read_stderr(self, stream: Iterable[bytes]) -> None:
buffer = None
log_pattern = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} (\w+) (.*)")
try:
for line in stream:
if self._call_client_terminated:
return
line = line.decode().rstrip()
match = log_pattern.match(line)
if match:
if buffer:
buffer.flush()
buffer = log.Buffer(
section=match.groups()[0], data=[match.groups()[1]]
)
elif buffer:
buffer.append(line)
if buffer:
buffer.flush()
except Exception:
pass
def _call_client(self, command: str, capture_output: bool = True) -> Result:
if not os.path.isdir(self._analysis_directory.get_root()):
raise EnvironmentException(
"`{}` is not a link tree.".format(self._analysis_directory.get_root())
)
client_command = [self._configuration.binary, command]
client_command.extend(self._flags())
client_command.append(self._analysis_directory.get_root())
def limit_memory_usage() -> None:
try:
limit = 30 * 1024 * 1024 * 1024 # 30 GB
resource.setrlimit(resource.RLIMIT_DATA, (limit, limit))
except OSError:
# Run the process with unlimited memory if the underlying syscall fails.
pass
LOG.debug("Running `%s`", " ".join(client_command))
with subprocess.Popen(
client_command,
stdout=subprocess.PIPE if capture_output else None,
stderr=subprocess.PIPE,
preexec_fn=limit_memory_usage,
) as process:
# Read stdout output
if capture_output:
stdout_reader = threading.Thread(
target=self._read_stdout, args=(process.stdout,)
)
stdout_reader.daemon = True
stdout_reader.start()
# Read the error output and print it.
self._call_client_terminated = False
stderr_reader = threading.Thread(
target=self._read_stderr, args=(process.stderr,)
)
stderr_reader.daemon = True
stderr_reader.start()
with register_non_unique_process(
process.pid, self.NAME, self.log_directory
):
# Wait for the process to finish and clean up.
process.wait()
# In the exceptional case, make sure that we print the error messages.
if process.returncode != 0:
stderr_reader.join()
self._call_client_terminated = True
if capture_output:
# pyre-fixme: stdout_reader is not always declared!
stdout_reader.join()
output = ""
if capture_output:
output = "".join(self._buffer)
if process.returncode != 0 and capture_output:
output = "".join(self._buffer)
return Result(code=process.returncode, output=output)
def _relative_path(self, path: str) -> str:
return os.path.relpath(path, self._original_directory)
def _state(self) -> State:
pid_path = os.path.join(self._log_directory, "server/server.pid")
try:
with open(pid_path) as file:
pid = int(file.read())
os.kill(pid, 0) # throws if process is not running
return State.RUNNING
except Exception:
return State.DEAD
# will open a socket, send a request, read the response and close the socket.
def _send_and_handle_socket_request(
self, request: json_rpc.Request, version_hash: str
) -> None:
try:
with SocketConnection(self._log_directory) as socket_connection:
socket_connection.perform_handshake(version_hash)
socket_connection.send(request)
stderr_reader = threading.Thread(
target=self._read_stderr, args=(sys.stderr,)
)
stderr_reader.daemon = True
stderr_reader.start()
response = socket_connection.read()
result = _convert_json_response_to_result(response)
result.check()
self._socket_result_handler(result)
except (
SocketException,
ResourceWarning,
ClientException,
json_rpc.JSONRPCException,
) as exception:
LOG.error("Error while waiting for server: %s", str(exception))
LOG.error("Run `pyre restart` in order to restart the server.")
self._exit_code = ExitCode.FAILURE
# Will be overwritten in subclasses to specialize how json socket
# responses are handled.
def _socket_result_handler(self, result: Result) -> None:
log.stdout.write(result.output)
def profiling_log_path(self) -> str:
return os.path.join(self._log_directory, "profiling.log")
@property
def analysis_directory(self) -> AnalysisDirectory:
return self._analysis_directory
@property
def configuration(self) -> Optional[Configuration]:
return self._configuration
|
count people with magnetic sensor.py
|
# Import standard python modules
import time
import sys
import threading
import datetime
# Import Raspberry Hardware
import board
import busio
# Import ADS1115 module
import adafruit_ads1x15.ads1115 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
# Import RPi.GPIO Module
import RPi.GPIO as GPIO
# Define global vars #
# Control of sincronization in Threads
lock = threading.RLock()
# Setup show data time
showDataTime=20
# Define Classes #
# Define class for instance objects in threading
class DataCount():
def __init__(self):
self.countTimes=0
# Define functions #
# Define functions for paralelism
def show_data(peopleCount):
counted=False
while True:
"""show data every 20 seconds and reset countTimes"""
if(int(time.time())%showDataTime==0):
if(not counted):
lock.acquire()
print("{} | Personas contadas {}".format(
datetime.datetime.now(), peopleCount.countTimes))
peopleCount.countTimes=0
lock.release()
counted=True
else:
counted=False
def main():
# Create the I2C bus
i2c = busio.I2C(board.SCL, board.SDA)
# Create the ADC object using the I2C bus
ads = ADS.ADS1115(i2c)
# Create single-ended input on channel 0
chan = AnalogIn(ads, ADS.P0)
# Dict with some GPIO pin numbers
pinList={"countState":7, "count":8}
# Setup GPIO setmode
GPIO.setmode(GPIO.BCM)
# Set GPIO pin signal OUT and initial value "shutdown"
GPIO.setup(list(pinList.values()), GPIO.OUT, initial=GPIO.LOW)
# Control of count system
countPeople=DataCount()
# When magnetic element is near the min value is reached
sensorValueMin=6900
# When magnetic element is far the max value is reached
sensorValueMax=10300
"""When magnectic element are middle of distances between min and
max, 85% of delta + minvalue is 85% traveled distance
"""
sensorValueMedium=int((sensorValueMax-sensorValueMin)*0.85)+sensorValueMin
baseTime=time.time()
countTemp=0 # Count value while state count and doesn't show
countTempLast=0 # For toggle LED alert count +1
# Flags for execute only one time (turn off, turn on)
counted=False
countState=False
countTempLastState=True
countRate=0.6
# Setup Threading, to show data every 20 seconds
hilo0=threading.Thread(target=show_data, args=[countPeople,])
hilo0.start()
while True:
sensorValue = chan.value # Distance of magnetic sensor
# Case if pressure plate is not pressed magnetic element is far (max value)
if(sensorValue>=sensorValueMedium):
baseTime=time.time()
if(countTemp!=0):
lock.acquire()
countPeople.countTimes+=countTemp
lock.release()
countTemp=0
# Turn off LED to alert plaque in high position
if(countState):
countState=False
GPIO.output(pinList.get("countState"), GPIO.LOW)
else: # Case if are plaque in low position
"""Triggered every showDataTime-1 seconds for update
counTimes
"""
# print("is",sensorValue, sensorValueMedium)
if(int(time.time())%(showDataTime-1)==0):
# Do only one time per showDataTime-1
if(not counted):
lock.acquire()
countPeople.countTimes+=countTemp
lock.release()
# Update base time with rate residue
baseTime=time.time()-(time.time()%countRate)
# Update countTempLast for LED count alert
continueTime=time.time()-baseTime
countTempLast=int(continueTime/countRate)+1
counted=True
else:
counted=False
continueTime=time.time()-baseTime
# Count rate + 1 more (case 0 to 0.6)
countTemp=int(continueTime/countRate)+1
# Turn on LED to alert plaque in low position
if(not countState):
countState=True
GPIO.output(pinList.get("countState"), GPIO.HIGH)
# Turn on LED to alert every counted +1 for 1 cycle time
if(countTempLast!=countTemp):
countTempLast=countTemp
countTempLastState=False
GPIO.output(pinList.get("count"), GPIO.HIGH)
elif(not countTempLastState):
countTempLastState=True
GPIO.output(pinList.get("count"), GPIO.LOW)
time.sleep(0.1) # Cycle time
if __name__=="__main__":
try:
main()
except:
print("{} line {}".format(sys.exc_info()[0],
sys.exc_info()[-1].tb_lineno))
GPIO.cleanup()
|
test_aioimport.py
|
import asyncio
import threading
import pytest
import aioimport
@pytest.mark.asyncio
async def test_import_module() -> None:
import tests.helper_test_import_module_event_source as event_source
task = asyncio.create_task(
aioimport.import_module("tests.helper_test_import_module_long_import")
)
await asyncio.wait_for(event_source.start.wait(), timeout=5)
event_source.end.set()
await asyncio.wait_for(task, timeout=event_source.TIMEOUT)
event = threading.Event()
def check_import() -> None:
import tests.helper_test_import_module_long_import
assert tests.helper_test_import_module_long_import.done
event.set()
event_source.start.clear()
event_source.end.clear()
threading.Thread(target=check_import, daemon=True).start()
event.wait(timeout=event_source.TIMEOUT)
@pytest.mark.asyncio
async def test_reload() -> None:
import tests.helper_test_reload_data
data1 = tests.helper_test_reload_data.data
await aioimport.reload(tests.helper_test_reload_data)
assert data1 is not tests.helper_test_reload_data.data
|
DelegateCall.py
|
"""Provides a multiprocessing-safe way to aggregate results from
multiple function calls.
In multi-process programs, it is oft often useful to delegate a
function call - e.g., writing a log message to a file - to another
process to avoid conflicts. :class:`pyexperiment.utils.DelegateCall`
implements a functor that, when called, passes the argument data to a
function running in a thread of the process that created the
DelegateCall object. The callback itself must be thread-safe though.
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import sys
import multiprocessing
import threading
import traceback
class DelegateCall(object): # pylint: disable=too-few-public-methods
"""Helper class that provides a multiprocessing-safe way to aggregate
results from multiple function calls.
The arguments to the __call__ function are passed through a
multiprocessing.Queue to the process where the class was
initialized (i.e., all arguments must be serializable).
"""
def __init__(self, callback):
"""Initializer, takes a callback that processes the received data in
the original process.
"""
# The callback
self.callback = callback
# The queue that aggregates the data
self._queue = multiprocessing.JoinableQueue(-1)
# The thread that processes it
processor_thread = threading.Thread(target=self._receive)
processor_thread.daemon = True
processor_thread.start()
def __call__(self, *data):
"""Send data, can be called from any process
"""
self._queue.put_nowait(data)
def join(self):
"""Returns true if there are currently no pending callbacks
"""
return self._queue.join()
def _receive(self):
"""Loops indefinitely receiving calls from the queue
"""
while True:
try:
data = self._queue.get()
self.callback(*data)
self._queue.task_done()
except EOFError: # pragma: no cover
# This is covered, but not reported
break
# This should really catch every other exception!
except Exception: # pylint: disable=broad-except
traceback.print_exc(file=sys.stderr)
# Should not get stuck here
self._queue.task_done()
|
slack.py
|
import json
import logging
import random
import re
import requests
import sys
import time
import traceback
from websocket import WebSocketConnectionClosedException
from markdownify import MarkdownConverter
from will import settings
from .base import IOBackend
from will.utils import Bunch, UNSURE_REPLIES, clean_for_pickling
from will.mixins import SleepMixin, StorageMixin
from multiprocessing import Process
from will.abstractions import Event, Message, Person, Channel, Attachment
from slackclient import SlackClient
from slackclient.server import SlackConnectionError
SLACK_SEND_URL = "https://slack.com/api/chat.postMessage"
SLACK_SET_TOPIC_URL = "https://slack.com/api/channels.setTopic"
SLACK_PRIVATE_SET_TOPIC_URL = "https://slack.com/api/groups.setTopic"
class SlackMarkdownConverter(MarkdownConverter):
def convert_strong(self, el, text):
return '*%s*' % text if text else ''
def convert_a(self, el, text):
href = el.get('href')
title = el.get('title')
if self.options['autolinks'] and text == href and not title:
# Shortcut syntax
return '<%s>' % href
title_part = ' "%s"' % title.replace('"', r'\"') if title else ''
return '<%s%s|%s>' % (href, title_part, text or '') if href else text or ''
class SlackAttachmentConverter:
""" This takes an Attachment object or list of Attachment objects and renders
a Slack ready Message Attachment JSON payload using the render() method. """
def __init__(self, attachments):
self.attachments = attachments
def render(self):
""" Builds a json payload for Slack Rich Format Message Attachments.
It takes either a single Attachment object or a list or Attachment objects."""
attachments = ""
try:
for a in self.attachments:
attachments += str(
[
{
"fallback": a.fallback,
"color": a.color,
"text": a.text,
"actions": a.actions,
"footer": a.footer,
"footer_icon": a.footer_icon,
}
]
)
except AttributeError:
attachments += str(
[
{
"fallback": self.attachments.fallback,
"color": self.attachments.color,
"text": self.attachments.text,
"actions": self.attachments.actions,
"footer": self.attachments.footer,
"footer_icon": self.attachments.footer_icon,
}
]
)
finally:
return attachments
class SlackBackend(IOBackend, SleepMixin, StorageMixin):
friendly_name = "Slack"
internal_name = "will.backends.io_adapters.slack"
required_settings = [
{
"name": "SLACK_API_TOKEN",
"obtain_at": """1. Go to https://api.slack.com/custom-integrations/legacy-tokens and sign in as yourself (or a user for Will).
2. Find the workspace you want to use, and click "Create token."
3. Set this token as SLACK_API_TOKEN."""
}
]
def get_channel_from_name(self, name):
for k, c in self.channels.items():
if c.name.lower() == name.lower() or c.id.lower() == name.lower():
return c
# We need to check if a user id was passed as a channel
# and get the correct IM channel if it was.
elif name.startswith('U') or name.startswith('W'):
return self.get_im_channel(name)
def get_im_channel(self, user_id):
logging.info('user_id : {}'.format(user_id))
con = self.client.api_call("conversations.open", users=user_id)
logging.info(con)
if 'channel' in con:
return con['channel']['id']
# return self.client.api_call("im.open", user=user_id)['channel']['id']
def normalize_incoming_event(self, event):
if (
"type" in event
and event["type"] == "message"
and ("subtype" not in event or event["subtype"] != "message_changed")
# Ignore thread summary events (for now.)
# TODO: We should stack these into the history.
and ("subtype" not in event or ("message" in event and "thread_ts" not in event["message"]))
):
# print("slack: normalize_incoming_event - %s" % event)
# Sample of group message
# {u'source_team': u'T5ACF70KV', u'text': u'test',
# u'ts': u'1495661121.838366', u'user': u'U5ACF70RH',
# u'team': u'T5ACF70KV', u'type': u'message', u'channel': u'C5JDAR2S3'}
# Sample of 1-1 message
# {u'source_team': u'T5ACF70KV', u'text': u'test',
# u'ts': u'1495662397.335424', u'user': u'U5ACF70RH',
# u'team': u'T5ACF70KV', u'type': u'message', u'channel': u'D5HGP0YE7'}
# Threaded message
# {u'event_ts': u'1507601477.000073', u'ts': u'1507601477.000073',
# u'subtype': u'message_replied', u'message':
# {u'thread_ts': u'1507414046.000010', u'text': u'hello!',
# u'ts': u'1507414046.000010', u'unread_count': 2,
# u'reply_count': 2, u'user': u'U5GUL9D9N', u'replies':
# [{u'user': u'U5ACF70RH', u'ts': u'1507601449.000007'}, {
# u'user': u'U5ACF70RH', u'ts': u'1507601477.000063'}],
# u'type': u'message', u'bot_id': u'B5HL9ABFE'},
# u'type': u'message', u'hidden': True, u'channel': u'D5HGP0YE7'}
sender = self.people[event["user"]]
channel = clean_for_pickling(self.channels[event["channel"]])
# print "channel: %s" % channel
interpolated_handle = "<@%s>" % self.me.id
real_handle = "@%s" % self.me.handle
will_is_mentioned = False
will_said_it = False
is_private_chat = False
thread = None
if "thread_ts" in event:
thread = event["thread_ts"]
# If the parent thread is a 1-1 between Will and I, also treat that as direct.
# Since members[] still comes in on the thread event, we can trust this, even if we're
# in a thread.
if channel.id == channel.name:
is_private_chat = True
# <@U5GUL9D9N> hi
# TODO: if there's a thread with just will and I on it, treat that as direct.
is_direct = False
if is_private_chat or event["text"].startswith(interpolated_handle) or event["text"].startswith(real_handle):
is_direct = True
if event["text"].startswith(interpolated_handle):
event["text"] = event["text"][len(interpolated_handle):].strip()
if event["text"].startswith(real_handle):
event["text"] = event["text"][len(real_handle):].strip()
if interpolated_handle in event["text"] or real_handle in event["text"]:
will_is_mentioned = True
if event["user"] == self.me.id:
will_said_it = True
m = Message(
content=event["text"],
type=event["type"],
is_direct=is_direct,
is_private_chat=is_private_chat,
is_group_chat=not is_private_chat,
backend=self.internal_name,
sender=sender,
channel=channel,
thread=thread,
will_is_mentioned=will_is_mentioned,
will_said_it=will_said_it,
backend_supports_acl=True,
original_incoming_event=clean_for_pickling(event),
)
return m
else:
# An event type the slack ba has no idea how to handle.
pass
def set_topic(self, event):
headers = {'Accept': 'text/plain'}
data = self.set_data_channel_and_thread(event)
data.update({
"token": settings.SLACK_API_TOKEN,
"as_user": True,
"topic": event.content,
})
if data["channel"].startswith("G"):
url = SLACK_PRIVATE_SET_TOPIC_URL
else:
url = SLACK_SET_TOPIC_URL
r = requests.post(
url,
headers=headers,
data=data,
**settings.REQUESTS_OPTIONS
)
self.handle_request(r, data)
def handle_outgoing_event(self, event):
if event.type in ["say", "reply"]:
if "kwargs" in event and "html" in event.kwargs and event.kwargs["html"]:
event.content = SlackMarkdownConverter().convert(event.content)
event.content = event.content.replace("&", "&")
event.content = event.content.replace(r"\_", "_")
kwargs = {}
if "kwargs" in event:
kwargs.update(**event.kwargs)
if hasattr(event, "source_message") and event.source_message and "channel" not in kwargs:
self.send_message(event)
else:
# Came from webhook/etc
# TODO: finish this.
target_channel = kwargs.get("room", kwargs.get("channel", None))
if target_channel:
event.channel = self.get_channel_from_name(target_channel)
if event.channel:
self.send_message(event)
else:
logging.error(
"I was asked to post to the slack %s channel, but it doesn't exist.",
target_channel
)
if self.default_channel:
event.channel = self.get_channel_from_name(self.default_channel)
event.content = event.content + " (for #%s)" % target_channel
self.send_message(event)
elif self.default_channel:
event.channel = self.get_channel_from_name(self.default_channel)
self.send_message(event)
else:
logging.critical(
"I was asked to post to a slack default channel, but I'm nowhere."
"Please invite me somewhere with '/invite @%s'", self.me.handle
)
if event.type in ["topic_change", ]:
self.set_topic(event)
elif (
event.type == "message.no_response"
and event.data.is_direct
and event.data.will_said_it is False
):
event.content = random.choice(UNSURE_REPLIES)
self.send_message(event)
def handle_request(self, r, data):
resp_json = r.json()
if not resp_json["ok"]:
if resp_json["error"] == "not_in_channel":
channel = self.get_channel_from_name(data["channel"])
if not hasattr(self, "me") or not hasattr(self.me, "handle"):
self.people
logging.critical(
"I was asked to post to the slack %s channel, but I haven't been invited. "
"Please invite me with '/invite @%s'" % (channel.name, self.me.handle)
)
else:
logging.error("Error sending to slack: %s" % resp_json["error"])
logging.error(resp_json)
assert resp_json["ok"]
def set_data_channel_and_thread(self, event, data={}):
if "channel" in event:
# We're coming off an explicit set.
try:
channel_id = event.channel.id
# This was a user ID so we will get channel from event.channel
except AttributeError:
channel_id = event.channel
else:
if "source_message" in event:
# Mentions that come back via self.say()
if hasattr(event.source_message, "data"):
channel_id = event.source_message.data.channel.id
if hasattr(event.source_message.data, "thread"):
data.update({
"thread_ts": event.source_message.data.thread
})
else:
# Mentions that come back via self.say() with a specific room (I think)
channel_id = event.source_message.channel.id
if hasattr(event.source_message, "thread"):
data.update({
"thread_ts": event.source_message.thread
})
else:
# Mentions that come back via self.reply()
if hasattr(event.data, "original_incoming_event"):
if hasattr(event.data.original_incoming_event.channel, "id"):
channel_id = event.data.original_incoming_event.channel.id
else:
channel_id = event.data.original_incoming_event.channel
else:
if hasattr(event.data["original_incoming_event"].data.channel, "id"):
channel_id = event.data["original_incoming_event"].data.channel.id
else:
channel_id = event.data["original_incoming_event"].data.channel
try:
# If we're starting a thread
if "kwargs" in event and "start_thread" in event.kwargs and event.kwargs["start_thread"] and ("thread_ts" not in data or not data["thread_ts"]):
if hasattr(event.source_message, "original_incoming_event"):
data.update({
"thread_ts": event.source_message.original_incoming_event["ts"]
})
elif (
hasattr(event.source_message, "data")
and hasattr(event.source_message.data, "original_incoming_event")
and "ts" in event.source_message.data.original_incoming_event
):
logging.error(
"Hm. I was told to start a new thread, but while using .say(), instead of .reply().\n"
"This doesn't really make sense, but I'm going to make the best of it by pretending you "
"used .say() and threading off of your message.\n"
"Please update your plugin to use .reply() when you have a second!"
)
data.update({
"thread_ts": event.source_message.data.original_incoming_event["ts"]
})
else:
if hasattr(event.data.original_incoming_event, "thread_ts"):
data.update({
"thread_ts": event.data.original_incoming_event.thread_ts
})
elif "thread" in event.data.original_incoming_event.data:
data.update({
"thread_ts": event.data.original_incoming_event.data.thread
})
except:
logging.info(traceback.format_exc().split(" ")[-1])
pass
data.update({
"channel": channel_id,
})
return data
def send_message(self, event):
if event.content == '' or event.content is None:
# slack errors with no_text if empty message
return
data = {}
if hasattr(event, "kwargs"):
data.update(event.kwargs)
# Add slack-specific functionality
if "color" in event.kwargs:
data.update({
"attachments": json.dumps([
{
"fallback": event.content,
"color": self._map_color(event.kwargs["color"]),
"text": event.content,
}
]),
})
elif "attachments" in event.kwargs:
if isinstance(event.kwargs["attachments"], Attachment):
data.update({
"text": event.content,
"attachments": SlackAttachmentConverter(event.kwargs["attachments"]).render()
})
else:
data.update({
"text": event.content,
"attachments": json.dumps(event.kwargs["attachments"])
})
else:
data.update({
"text": event.content,
})
else:
data.update({
"text": event.content,
})
data = self.set_data_channel_and_thread(event, data=data)
# Auto-link mention names
if "text" in data:
if data["text"].find("<@") != -1:
data["text"] = data["text"].replace("<@", "<@")
data["text"] = data["text"].replace(">", ">")
elif "attachments" in data and "text" in data["attachments"][0]:
if data["attachments"][0]["text"].find("<@") != -1:
data["attachments"][0]["text"] = data["attachments"][0]["text"].replace("<@", "<@")
data["attachments"][0]["text"] = data["attachments"][0]["text"].replace(">", ">")
data.update({
"token": settings.SLACK_API_TOKEN,
"as_user": True,
})
if hasattr(event, "kwargs") and "html" in event.kwargs and event.kwargs["html"]:
data.update({
"parse": "none",
})
headers = {'Accept': 'text/plain'}
r = requests.post(
SLACK_SEND_URL,
headers=headers,
data=data,
**settings.REQUESTS_OPTIONS
)
self.handle_request(r, data)
def _map_color(self, color):
# Turn colors into hex values, handling old slack colors, etc
if color == "red":
return "danger"
elif color == "yellow":
return "warning"
elif color == "green":
return "good"
return color
def join_channel(self, channel_id):
return self.client.api_call(
"channels.join",
channel=channel_id,
)
@property
def people(self):
if not hasattr(self, "_people") or self._people is {}:
self._update_people()
return self._people
@property
def default_channel(self):
if not hasattr(self, "_default_channel") or not self._default_channel:
self._decide_default_channel()
return self._default_channel
@property
def channels(self):
if not hasattr(self, "_channels") or self._channels is {}:
self._update_channels()
return self._channels
@property
def client(self):
if not hasattr(self, "_client"):
self._client = SlackClient(settings.SLACK_API_TOKEN)
return self._client
def _decide_default_channel(self):
self._default_channel = None
if not hasattr(self, "complained_about_default"):
self.complained_about_default = False
self.complained_uninvited = False
# Set self.me
self.people
if hasattr(settings, "SLACK_DEFAULT_CHANNEL"):
channel = self.get_channel_from_name(settings.SLACK_DEFAULT_CHANNEL)
if channel:
if self.me.id in channel.members:
self._default_channel = channel.id
return
elif not self.complained_about_default:
self.complained_about_default = True
logging.error("The defined default channel(%s) does not exist!",
settings.SLACK_DEFAULT_CHANNEL)
for c in self.channels.values():
if c.name != c.id and self.me.id in c.members:
self._default_channel = c.id
if not self._default_channel and not self.complained_uninvited:
self.complained_uninvited = True
logging.critical("No channels with me invited! No messages will be sent!")
def _update_channels(self):
channels = {}
for c in self.client.server.channels:
members = {}
for m in c.members:
members[m] = self.people[m]
channels[c.id] = Channel(
id=c.id,
name=c.name,
source=clean_for_pickling(c),
members=members
)
if len(channels.keys()) == 0:
# Server isn't set up yet, and we're likely in a processing thread,
if self.load("slack_channel_cache", None):
self._channels = self.load("slack_channel_cache", None)
else:
self._channels = channels
self.save("slack_channel_cache", channels)
def _update_people(self):
people = {}
self.handle = self.client.server.username
for k, v in self.client.server.users.items():
user_timezone = None
if v.tz:
user_timezone = v.tz
people[k] = Person(
id=v.id,
mention_handle="<@%s>" % v.id,
handle=v.name,
source=clean_for_pickling(v),
name=v.real_name,
)
if v.name == self.handle:
self.me = Person(
id=v.id,
mention_handle="<@%s>" % v.id,
handle=v.name,
source=clean_for_pickling(v),
name=v.real_name,
)
if user_timezone and user_timezone != 'unknown':
people[k].timezone = user_timezone
if v.name == self.handle:
self.me.timezone = user_timezone
if len(people.keys()) == 0:
# Server isn't set up yet, and we're likely in a processing thread,
if self.load("slack_people_cache", None):
self._people = self.load("slack_people_cache", None)
if not hasattr(self, "me") or not self.me:
self.me = self.load("slack_me_cache", None)
if not hasattr(self, "handle") or not self.handle:
self.handle = self.load("slack_handle_cache", None)
else:
self._people = people
self.save("slack_people_cache", people)
self.save("slack_me_cache", self.me)
self.save("slack_handle_cache", self.handle)
def _update_backend_metadata(self):
self._update_people()
self._update_channels()
def _watch_slack_rtm(self):
while True:
try:
if self.client.rtm_connect(auto_reconnect=True):
self._update_backend_metadata()
num_polls_between_updates = 30 / settings.EVENT_LOOP_INTERVAL # Every 30 seconds
current_poll_count = 0
while True:
events = self.client.rtm_read()
if len(events) > 0:
# TODO: only handle events that are new.
# print(len(events))
for e in events:
self.handle_incoming_event(e)
# Update channels/people/me/etc every 10s or so.
current_poll_count += 1
if current_poll_count > num_polls_between_updates:
self._update_backend_metadata()
current_poll_count = 0
self.sleep_for_event_loop()
except (WebSocketConnectionClosedException, SlackConnectionError):
logging.error('Encountered connection error attempting reconnect in 2 seconds')
time.sleep(2)
except (KeyboardInterrupt, SystemExit):
break
except:
logging.critical("Error in watching slack RTM: \n%s" % traceback.format_exc())
break
def bootstrap(self):
# Bootstrap must provide a way to to have:
# a) self.normalize_incoming_event fired, or incoming events put into self.incoming_queue
# b) any necessary threads running for a)
# c) self.me (Person) defined, with Will's info
# d) self.people (dict of People) defined, with everyone in an organization/backend
# e) self.channels (dict of Channels) defined, with all available channels/rooms.
# Note that Channel asks for members, a list of People.
# f) A way for self.handle, self.me, self.people, and self.channels to be kept accurate,
# with a maximum lag of 60 seconds.
# Property, auto-inits.
self.client
self.rtm_thread = Process(target=self._watch_slack_rtm)
self.rtm_thread.start()
def terminate(self):
if hasattr(self, "rtm_thread"):
self.rtm_thread.terminate()
while self.rtm_thread.is_alive():
time.sleep(0.2)
|
logbeaver.py
|
#!/usr/bin/env python
#coding: utf-8
from sys import stdin
import re
import socket
from urlparse import urlsplit, parse_qs
import argparse
import logging
import threading
import Queue
def main ():
parser = argparse.ArgumentParser()
parser.add_argument('--host', default = '127.0.0.1')
parser.add_argument('--port', default = 8125, type = int)
parser.add_argument('--verbose', '-v', action = 'store_true')
parser.add_argument('--no-parse-fail-warn', '-npw', action = 'store_true')
args = parser.parse_args()
logging.basicConfig(level = logging.INFO, format = '%(asctime)s %(levelname)-5s %(filename)s:%(funcName)s:%(lineno)d %(message)s')
log = logging.getLogger(__name__)
local_fqdn = socket.getfqdn()
log.info('Starting. Using fqdn "%s", statsd "%s:%s"' % (local_fqdn, args.host, args.port))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
send_que = Queue.Queue()
def send_loop ():
while True:
data = send_que.get()
sock.sendto(data, (args.host, args.port))
thr = threading.Thread(target = send_loop)
thr.daemon = True #TODO "If you want your threads to stop gracefully, make them non-daemonic and use a suitable signalling mechanism such as an Event."
thr.start()
# log_format for_logstash '[$time_local] $request_time $upstream_response_time $upstream_cache_status $status'
# ' "$request" $remote_addr "$http_referer" "$http_user_agent"';
line_regexp = re.compile(r'\[.+\] ([^ ]+) \[(.+)\] [^ ]+ ([^ ]+) "([^ ]+) ([^ ]+)')
dispatch_re = re.compile(r'/dispatch/[A-Z\d]+')
stat_re = re.compile(r'/stat/?$')
while True:
line = stdin.readline()
if not line:
break
try:
match = line_regexp.match(line)
if match is None:
if not args.no_parse_fail_warn:
log.warning("failed to match line: %s" % repr(line))
continue
request_time, upstream_response_time, status, verb, url = match.groups()
assert request_time != '-', line
batch_data = [
"logstash.%s.nginx_access.response.%s:1|c" % (local_fqdn, status),
"logstash.%s.nginx_access.request_time:%s|ms" % (local_fqdn, request_time),
]
#handle nginx upstream retries (defaults + fail_timeout=0)
#TODO think more
if upstream_response_time != '-':
v = [float(v) for v in upstream_response_time.split(', ')]
m = max(v)
assert m == v[-1], line
upstream_response_time = m
num = len(v) - 1
if num:
batch_data.append("logstash.%s.nginx_access.upstream_retries:%s|c" % (local_fqdn, num))
if upstream_response_time != '-':
batch_data.append("logstash.%s.nginx_access.upstream_response_time:%s|ms" % (local_fqdn, upstream_response_time))
url_parts = urlsplit(url)
keep_empty = True
strict_parse = False #TODO with strict parsing fails on /com/site/ACF042EDF74FB312392E4BB059A9E8C0AF0EAE9AA28ED6D7?0.7257665740326047
query = parse_qs(url_parts.query, keep_empty, strict_parse) if url_parts.query else None
match = dispatch_re.match(url_parts.path)
if match:
if query:
#TODO refactor
v = query.get('version')
if v: v = v[0]
v = v or 'not_set'
if v: int(v) #TODO proper checks/escape
d = query.get('dispatcher')
if d: d = d[0]
d = d or 'not_set'
if d: int(d) #TODO
else:
v = d = 'not_set'
batch_data.append("logstash.%s.nginx_access.dispatch_ver.dispatcher.%s:1|c" % (local_fqdn, d))
batch_data.append("logstash.%s.nginx_access.dispatch_ver.version.%s:1|c" % (local_fqdn, v))
else:
match = stat_re.match(url_parts.path)
if match:
if upstream_response_time != '-':
batch_data.append("logstash.%s.nginx_access_bapi_stat.response.%s:1|c" % (local_fqdn, status))
batch_data.append("logstash.%s.nginx_access_bapi_stat.upstream_response_time:%s|ms" % (local_fqdn, upstream_response_time))
else:
log.warning("upstream_response_time == '-': %s" % repr(line))
if args.verbose:
#TODO better use debug level instead of flag (or raise level with flag)?
log.info(batch_data)
data = "\n".join(batch_data)
# print len(data) #TODO https://github.com/etsy/statsd/blob/master/docs/metric_types.md#multi-metric-packets
send_que.put(data)
except:
log.error("error while processing line: %s" % repr(line))
raise
if __name__ == '__main__':
main()
|
runner.py
|
#!/usr/bin/env python2
# Copyright 2013-present Barefoot Networks, Inc.
# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import Queue
import argparse
import json
import logging
import os
import re
import struct
import subprocess
import sys
import threading
import time
from collections import OrderedDict
import google.protobuf.text_format
import grpc
from p4.tmp import p4config_pb2
from p4.v1 import p4runtime_pb2
from lib.bmv2 import Bmv2Switch
PTF_ROOT = os.path.dirname(os.path.realpath(__file__))
BMV2_LOG = PTF_ROOT + "/bmv2.log"
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("PTF runner")
def error(msg, *args, **kwargs):
logger.error(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
logger.warn(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
logger.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
logger.debug(msg, *args, **kwargs)
def check_ifaces(ifaces):
"""
Checks that required interfaces exist.
"""
ifconfig_out = subprocess.check_output(['ifconfig'])
iface_list = re.findall(r'^([a-zA-Z0-9]+)', ifconfig_out, re.S | re.M)
present_ifaces = set(iface_list)
ifaces = set(ifaces)
return ifaces <= present_ifaces
def build_bmv2_config(bmv2_json_path):
"""
Builds the device config for BMv2
"""
device_config = p4config_pb2.P4DeviceConfig()
device_config.reassign = True
with open(bmv2_json_path) as f:
device_config.device_data = f.read()
return device_config
def build_tofino_config(prog_name, bin_path, cxt_json_path):
device_config = p4config_pb2.P4DeviceConfig()
with open(bin_path, 'rb') as bin_f:
with open(cxt_json_path, 'r') as cxt_json_f:
device_config.device_data = ""
device_config.device_data += struct.pack("<i", len(prog_name))
device_config.device_data += prog_name
tofino_bin = bin_f.read()
device_config.device_data += struct.pack("<i", len(tofino_bin))
device_config.device_data += tofino_bin
cxt_json = cxt_json_f.read()
device_config.device_data += struct.pack("<i", len(cxt_json))
device_config.device_data += cxt_json
return device_config
def update_config(p4info_path, bmv2_json_path, tofino_bin_path,
tofino_cxt_json_path, grpc_addr, device_id):
"""
Performs a SetForwardingPipelineConfig on the device
"""
channel = grpc.insecure_channel(grpc_addr)
stub = p4runtime_pb2.P4RuntimeStub(channel)
debug("Sending P4 config")
# Send master arbitration via stream channel
# This should go in library, to be re-used also by base_test.py.
stream_out_q = Queue.Queue()
stream_in_q = Queue.Queue()
def stream_req_iterator():
while True:
p = stream_out_q.get()
if p is None:
break
yield p
def stream_recv(stream):
for p in stream:
stream_in_q.put(p)
def get_stream_packet(type_, timeout=1):
start = time.time()
try:
while True:
remaining = timeout - (time.time() - start)
if remaining < 0:
break
msg = stream_in_q.get(timeout=remaining)
if not msg.HasField(type_):
continue
return msg
except: # timeout expired
pass
return None
stream = stub.StreamChannel(stream_req_iterator())
stream_recv_thread = threading.Thread(target=stream_recv, args=(stream,))
stream_recv_thread.start()
req = p4runtime_pb2.StreamMessageRequest()
arbitration = req.arbitration
arbitration.device_id = device_id
election_id = arbitration.election_id
election_id.high = 0
election_id.low = 1
stream_out_q.put(req)
rep = get_stream_packet("arbitration", timeout=5)
if rep is None:
error("Failed to establish handshake")
return False
try:
# Set pipeline config.
request = p4runtime_pb2.SetForwardingPipelineConfigRequest()
request.device_id = device_id
election_id = request.election_id
election_id.high = 0
election_id.low = 1
config = request.config
with open(p4info_path, 'r') as p4info_f:
google.protobuf.text_format.Merge(p4info_f.read(), config.p4info)
if bmv2_json_path is not None:
device_config = build_bmv2_config(bmv2_json_path)
else:
device_config = build_tofino_config("name", tofino_bin_path,
tofino_cxt_json_path)
config.p4_device_config = device_config.SerializeToString()
request.action = p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT
try:
stub.SetForwardingPipelineConfig(request)
except Exception as e:
error("Error during SetForwardingPipelineConfig")
error(str(e))
return False
return True
finally:
stream_out_q.put(None)
stream_recv_thread.join()
def run_test(p4info_path, grpc_addr, device_id, cpu_port, ptfdir, port_map_path,
platform=None, extra_args=()):
"""
Runs PTF tests included in provided directory.
Device must be running and configfured with appropriate P4 program.
"""
# TODO: check schema?
# "ptf_port" is ignored for now, we assume that ports are provided by
# increasing values of ptf_port, in the range [0, NUM_IFACES[.
port_map = OrderedDict()
with open(port_map_path, 'r') as port_map_f:
port_list = json.load(port_map_f)
for entry in port_list:
p4_port = entry["p4_port"]
iface_name = entry["iface_name"]
port_map[p4_port] = iface_name
if not check_ifaces(port_map.values()):
error("Some interfaces are missing")
return False
ifaces = []
# FIXME
# find base_test.py
pypath = os.path.dirname(os.path.abspath(__file__))
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] += ":" + pypath
else:
os.environ['PYTHONPATH'] = pypath
for iface_idx, iface_name in port_map.items():
ifaces.extend(['-i', '{}@{}'.format(iface_idx, iface_name)])
cmd = ['ptf']
cmd.extend(['--test-dir', ptfdir])
cmd.extend(ifaces)
test_params = 'p4info=\'{}\''.format(p4info_path)
test_params += ';grpcaddr=\'{}\''.format(grpc_addr)
test_params += ';device_id=\'{}\''.format(device_id)
test_params += ';cpu_port=\'{}\''.format(cpu_port)
if platform is not None:
test_params += ';pltfm=\'{}\''.format(platform)
cmd.append('--test-params={}'.format(test_params))
cmd.extend(extra_args)
debug("Executing PTF command: {}".format(' '.join(cmd)))
try:
# we want the ptf output to be sent to stdout
p = subprocess.Popen(cmd)
p.wait()
except:
error("Error when running PTF tests")
return False
return p.returncode == 0
def check_ptf():
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(['ptf', '--version'],
stdout=devnull, stderr=devnull)
return True
except subprocess.CalledProcessError:
return True
except OSError: # PTF not found
return False
# noinspection PyTypeChecker
def main():
parser = argparse.ArgumentParser(
description="Compile the provided P4 program and run PTF tests on it")
parser.add_argument('--device',
help='Target device',
type=str, action="store", required=True,
choices=['tofino', 'bmv2', 'stratum-bmv2'])
parser.add_argument('--p4info',
help='Location of p4info proto in text format',
type=str, action="store", required=True)
parser.add_argument('--bmv2-json',
help='Location BMv2 JSON output from p4c (if target is bmv2)',
type=str, action="store", required=False)
parser.add_argument('--tofino-bin',
help='Location of Tofino .bin output from p4c (if target is tofino)',
type=str, action="store", required=False)
parser.add_argument('--tofino-ctx-json',
help='Location of Tofino context.json output from p4c (if target is tofino)',
type=str, action="store", required=False)
parser.add_argument('--grpc-addr',
help='Address to use to connect to P4 Runtime server',
type=str, default='localhost:50051')
parser.add_argument('--device-id',
help='Device id for device under test',
type=int, default=1)
parser.add_argument('--cpu-port',
help='CPU port ID of device under test',
type=int, required=True)
parser.add_argument('--ptf-dir',
help='Directory containing PTF tests',
type=str, required=True)
parser.add_argument('--port-map',
help='Path to JSON port mapping',
type=str, required=True)
parser.add_argument('--platform',
help='Target platform on which tests are run (if target is tofino)',
type=str, required=False)
parser.add_argument('--skip-config',
help='Assume a device with pipeline already configured',
action="store_true", default=False)
parser.add_argument('--skip-test',
help='Skip test execution (useful to perform only pipeline configuration)',
action="store_true", default=False)
parser.add_argument('--skip-bmv2-start',
help='Skip switch start (requires that the switch be started manually \
beforehand, only applies to bmv2 and bmv2-stratum targets)',
action="store_true", default=False)
args, unknown_args = parser.parse_known_args()
if not check_ptf():
error("Cannot find PTF executable")
sys.exit(1)
device = args.device
bmv2_json = None
tofino_ctx_json = None
tofino_bin = None
if not os.path.exists(args.p4info):
error("P4Info file {} not found".format(args.p4info))
sys.exit(1)
if device == 'tofino':
if not os.path.exists(args.tofino_bin):
error("Tofino binary config file {} not found".format(
args.tofino_bin))
sys.exit(1)
if not os.path.exists(args.tofino_ctx_json):
error("Tofino context json file {} not found".format(
args.tofino_ctx_json))
sys.exit(1)
tofino_bin = args.tofino_bin
tofino_ctx_json = args.tofino_ctx_json
elif device == 'bmv2' or device == 'stratum-bmv2':
if not os.path.exists(args.bmv2_json):
error("BMv2 json file {} not found".format(args.bmv2_json))
sys.exit(1)
bmv2_json = args.bmv2_json
if not os.path.exists(args.port_map):
print "Port map path '{}' does not exist".format(args.port_map)
sys.exit(1)
grpc_port = args.grpc_addr.split(':')[1]
bmv2_sw = None
if args.skip_bmv2_start is False:
if device == 'bmv2':
bmv2_sw = Bmv2Switch(device_id=args.device_id,
port_map_path=args.port_map,
grpc_port=grpc_port,
cpu_port=args.cpu_port,
loglevel='debug',
logfile_path=BMV2_LOG)
bmv2_sw.start()
elif device == 'stratum-bmv2':
bmv2_sw = Bmv2Switch(device_id=args.device_id,
port_map_path=args.port_map,
grpc_port=grpc_port,
cpu_port=args.cpu_port,
loglevel='debug',
is_stratum=True,
logfile_path=BMV2_LOG)
bmv2_sw.start()
try:
success = True
if not args.skip_config:
success = update_config(p4info_path=args.p4info,
bmv2_json_path=bmv2_json,
tofino_bin_path=tofino_bin,
tofino_cxt_json_path=tofino_ctx_json,
grpc_addr=args.grpc_addr,
device_id=args.device_id)
if not success:
if bmv2_sw is not None:
bmv2_sw.kill()
sys.exit(2)
if not args.skip_test:
success = run_test(p4info_path=args.p4info,
device_id=args.device_id,
grpc_addr=args.grpc_addr,
cpu_port=args.cpu_port,
ptfdir=args.ptf_dir,
port_map_path=args.port_map,
platform=args.platform,
extra_args=unknown_args)
if bmv2_sw is not None:
bmv2_sw.kill()
if not success:
sys.exit(3)
except Exception:
if bmv2_sw is not None:
bmv2_sw.kill()
raise
if __name__ == '__main__':
main()
|
detector_utils.py
|
# Utilities for object detector.
import numpy as np
import sys
import tensorflow as tf
import os
from threading import Thread
from datetime import datetime
import cv2
from utils import label_map_util
from collections import defaultdict
detection_graph = tf.Graph()
sys.path.append("..")
# score threshold for showing bounding boxes.
_score_thresh = 0.2
MODEL_NAME = 'hand_data_model'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt')
NUM_CLASSES = 1
# load label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load a frozen infrerence graph into memory
def load_inference_graph(path=PATH_TO_CKPT):
# load frozen tensorflow model into memory
print("> ====== loading HAND frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.compat.v2.io.gfile.GFile(path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.compat.v1.Session(graph=detection_graph)
print("> ====== Hand Inference graph loaded.")
return detection_graph, sess
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
i = np.argmax(scores)
# for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
# cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
heigh_mid = (top+bottom)/2
width_mid = (right+left)/2
if (top - bottom) >= (right - left):
length = (top - bottom)*0.75
else:
length = (right - left)*0.75
l1,l2,_ = np.shape(image_np)
crop_image = image_np[max(0,int(heigh_mid-length)):min(l1,int(heigh_mid+length)),max(0,int(width_mid - length)):min(l2,int(width_mid + length))]
return image_np[int(top):int(bottom),int(left):int(right)]
# return crop_image
def save_box(file,score_thresh, scores, boxes, im_width, im_height,image_np):
if (max(scores) > score_thresh):
i = np.argmax(scores)
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
with open(file,'w') as w:
w.write(str(int(left))+'\n')
w.write(str(int(top))+'\n')
w.write(str(int(right))+'\n')
w.write(str(int(bottom))+'\n')
def convert_box(label,score_thresh, scores, boxes, im_width, im_height,image_np):
if (max(scores) > score_thresh):
i = np.argmax(scores)
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
print(left,right,bottom,top)
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
lr = (right-left)/2
tb = (bottom-top)/2
left = max(0,left-lr)
right = min(right+lr,im_width)
bottom = min(bottom+ tb,im_height)
top = max(0,top-tb)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
print(left,right,bottom,top)
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
crop_image = image_np[int(top):int(bottom),int(left):int(right)]
cv2.imwrite('abc.jpg',cv2.cvtColor(crop_image, cv2.COLOR_RGB2BGR))
return [{'label_name':label,'x1':int(left),'x2':int(right),'y1':int(top),'y2':int(bottom)}]
# Show fps value on image.
def draw_fps_on_image(fps, image_np):
cv2.putText(image_np, fps, (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
# Actual detection .. generate scores and bounding boxes given an image
def detect_objects(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores),np.squeeze(classes),np.squeeze(num)
def classifier(imagenp,graph,sess):
image_tensor = graph.get_tensor_by_name('image_input:0')
output = graph.get_tensor_by_name('predictions/Softmax:0')
pred = sess.run([output],feed_dict={image_tensor:imagenp})
return pred
# Code to thread reading camera input.
# Source : Adrian Rosebrock
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def size(self):
# return size of the capture device
return self.stream.get(3), self.stream.get(4)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
supervisor.py
|
from dataclasses import dataclass
import multiprocessing
import threading
import time
import typing as tp
from pypeln import utils as pypeln_utils
from .worker import Worker
from .queue import IterableQueue
@dataclass
class Supervisor:
workers: tp.List[Worker]
main_queue: IterableQueue
done: bool = False
def __call__(self):
try:
workers = [worker for worker in self.workers if worker.timeout > 0]
if not workers:
return
while not self.done:
for worker in workers:
if worker.did_timeout():
worker.stop()
worker.start()
time.sleep(0.01)
except BaseException as e:
self.main_queue.raise_exception(e)
def __enter__(self):
self.start()
def __exit__(self, *args):
self.done = True
for worker in self.workers:
worker.stop()
while any(worker.process.is_alive() for worker in self.workers):
time.sleep(pypeln_utils.TIMEOUT)
def start(self):
for worker in self.workers:
worker.start()
t = threading.Thread(target=self)
t.daemon = True
t.start()
|
sensor.py
|
import pandas as pd
import serial
import time
import datetime
import numpy as np
import threading
import os
from serial import SerialException
from queue import Queue
from threading import Thread
from datetime import timedelta
from datetime import datetime
class CT4319():
def __init__(self):
self.ser = serial.Serial()
self.q = Queue()
self.t_stop = threading.Event()
self.t = Thread(target=self.read_thread, args=(self.ser, self.q, self.t_stop))
self.t.setDaemon(True)
self.output_dir = ""
self.conductivity = []
self.temperature = []
self.conductance = []
self.salinity = []
self.density = []
self.soundspeed = []
self.rawdata = []
self.scan = 0
self.last_output = 0
self.last_len_mean = 0
self.data = pd.DataFrame(columns = ["Scan", "Time", "Temperature[°C]", "Condutivity[mS/cm]",
"Salinity[PSU]", "Density[kg/m3]", "Sound Speed[m/s]", "Conductance[mS]"])
self.data_mean = pd.DataFrame(columns = ["Scans", "Initial Time", "Final Time", "Temperature[°C]",
"Condutivity[mS/cm]", "Salinity[PSU]", "Density[kg/m3]",
"Sound Speed[m/s]", "Conductance[mS]"])
self.properties = {"Product Name":{"Value":"",
"Last Value Update":"",
"Type":"String",
"No of elements":31,
"Use":"AADI Product name",
"Access Protection":"Read Only"},
"Product Number":{"Value":"",
"Last Value Update":"",
"Type":"String",
"No of elements":6,
"Use":"AADI Product number",
"Access Protection":"Read Only"},
"Serial Number":{"Value":"",
"Last Value Update":"",
"Type":"INT",
"No of elements":1,
"Use":"Serial Number",
"Access Protection":"Read Only"},
"SW ID":{"Value":"",
"Last Value Update":"",
"Type":"String",
"No of elements":11,
"Use":"Unique identifier for internal firmware",
"Access Protection":"Read Only"},
"Software Version":{"Value":"",
"Last Value Update":"",
"Type":"INT",
"No of elements":3,
"Use":"Software version (Major, Minor, Built)",
"Access Protection":"Read Only"},
"HW ID X":{"Value":"",
"Last Value Update":"",
"Type":"String",
"No of elements":19,
"Use":"Hardware Identifier, X =1..3",
"Access Protection":"Read Only"},
"HW Version X":{"Value":"",
"Last Value Update":"",
"Type":"String",
"No of elements":9,
"Use":"Hardware Identifier, X =1..3",
"Access Protection":"Read Only"},
"System Control":{"Value":"",
"Last Value Update":"",
"Type":"INT",
"No of elements":3,
"Use":"For AADI service personnel only",
"Access Protection":"Read Only"},
"Production Date":{"Value":"",
"Last Value Update":"",
"Type":"String",
"No of elements":31,
"Use":"AADI production date, format YYYY-MM-DD",
"Access Protection":"Read Only"},
"Last Service":{"Value":"",
"Last Value Update":"",
"Type":"String",
"No of elements":31,
"Use":"Last service date, format YYYY-MM-DD, empty by default",
"Access Protection":"Read Only"},
"Last Calibration":{"Value":"",
"Last Value Update":"",
"Type":"String",
"No of elements":31,
"Use":"Last calibration date, format YYYY-MM-DD",
"Access Protection":"Read Only"},
"Calibration Interval":{"Value":"",
"Last Value Update":"",
"Type":"INT",
"No of elements":1,
"Use":"Recommended calibration interval in days",
"Access Protection":"Read Only"},
"Interval":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":1,
"Use":"Sampling Interval in seconds",
"Access Protection":"Low"},
"Location":{"Value":"",
"Last Value Update":"",
"Type":"String",
"No of elements":31,
"Use":"User setting for location",
"Access Protection":"Low"},
"Geographic Position":{"Value":"",
"Last Value Update":"",
"Type":"String",
"No of elements":31,
"Use":"User setting for geographic position",
"Access Protection":"Low"},
"Vertical Position":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":1,
"Use":"User setting for describing sensor position",
"Access Protection":"Low"},
"Reference":{"Value":"",
"Last Value Update":"",
"Type":"String",
"No of elements":31,
"Use":"User setting for describing sensor reference",
"Access Protection":"Low"},
"Pressure":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":1,
"Use":"Water pressure in kPa",
"Access Protection":"High"},
"Mode":{"Value":"",
"Last Value Update":"",
"Type":"ENUM",
"No of elements":1,
"Use":"Sets the sensor operation mode (AiCaP, Smart Sensor Terminal, AADI Real-Time, Smart Sensor Terminal FW2)",
"Access Protection":"High"},
"Enable Sleep":{"Value":"",
"Last Value Update":"",
"Type":"BOOL",
"No of elements":1,
"Use":"Enable sleep modeSets the sensor operation mode (AiCaP, Smart Sensor Terminal, AADI Real-Time, Smart Sensor Terminal FW2)",
"Access Protection":"High"},
"Enable Polled Mode":{"Value":"",
"Last Value Update":"",
"Type":"BOOL",
"No of elements":1,
"Use":"Enable polled mode (for RS232). When set to ‘no’ the sensor will sample at the interval given by the Interval property. When set to ‘yes’ the sensor will wait for a Do Sample command.",
"Access Protection":"High"},
"Enable Text":{"Value":"",
"Last Value Update":"",
"Type":"BOOL",
"No of elements":1,
"Use":"Controls the insertion of descriptive text, i.e. parameter names",
"Access Protection":"High"},
"Enable Decimalformat":{"Value":"",
"Last Value Update":"",
"Type":"BOOL",
"No of elements":1,
"Use":"Controls the use of decimal format in the output string",
"Access Protection":"High"},
"Enable Temperature":{"Value":"",
"Last Value Update":"",
"Type":"BOOL",
"No of elements":1,
"Use":"Controls inclusion of Temperature in the output string",
"Access Protection":"High"},
"Enable Derived Parameters":{"Value":"",
"Last Value Update":"",
"Type":"BOOL",
"No of elements":1,
"Use":"Controls inclusion of Salinity, Density and Speed of sound in the output string",
"Access Protection":"High"},
"Enable Rawdata":{"Value":"",
"Last Value Update":"",
"Type":"BOOL",
"No of elements":1,
"Use":"Controls inclusion of Conductivity in the output string",
"Access Protection":"High"},
"Node Description":{"Value":"",
"Last Value Update":"",
"Type":"String",
"No of elements":31,
"Use":"User text for describing node, placement etc",
"Access Protection":"High"},
"Owner":{"Value":"",
"Last Value Update":"",
"Type":"String",
"No of elements":31,
"Use":"User setting for owner",
"Access Protection":"High"},
"Baudrate":{"Value":"",
"Last Value Update":"",
"Type":"ENUM",
"No of elements":1,
"Use":"RS232 baudrate: 4800, 9600, 57600, or 115200. Default baudrate is 9600",
"Access Protection":"High"},
"Flow Control":{"Value":"",
"Last Value Update":"",
"Type":"BOOL",
"No of elements":1,
"Use":"RS232 flow control: ‘None’ or ‘Xon/Xoff’",
"Access Protection":"High"},
"Enable Comm Indicator":{"Value":"",
"Last Value Update":"",
"Type":"BOOL",
"No of elements":1,
"Use":"Enable communication sleep (’%’) and communication ready (‘!’) indicators",
"Access Protection":"High"},
"Comm TimeOut":{"Value":"",
"Last Value Update":"",
"Type":"ENUM",
"No of elements":1,
"Use":"RS232 communication activation timeout: Always On,10 s,20 s,30 s,1 min,2 min,5 min,10 min",
"Access Protection":"High"},
"TempCoef":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":6,
"Use":"Curve fitting coefficients for the temp measurements.",
"Access Protection":"High"},
"R0Coef0":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 0",
"Access Protection":"High"},
"R0Coef1":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 0",
"Access Protection":"High"},
"R0Coef2":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 0",
"Access Protection":"High"},
"R0Coef3":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 0",
"Access Protection":"High"},
"R0Coef4":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 0",
"Access Protection":"High"},
"R1Coef0":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 1",
"Access Protection":"High"},
"R1Coef1":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 1",
"Access Protection":"High"},
"R1Coef2":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 1",
"Access Protection":"High"},
"R1Coef3":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 1",
"Access Protection":"High"},
"R1Coef4":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 1",
"Access Protection":"High"},
"R1Coef5":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 1",
"Access Protection":"High"},
"R1Coef6":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 1",
"Access Protection":"High"},
"R1Coef7":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 1",
"Access Protection":"High"},
"R1Coef8":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 1",
"Access Protection":"High"},
"R1Coef9":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":4,
"Use":"Temp Coefficients for Loop reading to Conductance, Range 1",
"Access Protection":"High"},
"CellCoef":{"Value":"",
"Last Value Update":"",
"Type":"Float",
"No of elements":1,
"Use":"Cell constant for converting mS to mS/cm",
"Access Protection":"High"},
"Range":{"Value":"",
"Last Value Update":"",
"Type":"INT",
"No of elements":1,
"Use":"Range setting: -1=Auto range, 0=Low range, 1=High range",
"Access Protection":"High"}}
def start_comm(self, port, baudrate):
try:
self.ser.port = port
self.ser.baudrate = baudrate
self.ser.open()
self.t.start()
except SerialException:
print("Error communicating with the device!")
print("Make sure the device and baudrate are correct!")
print("Device = " + port)
print("Baudrate = " + str(baudrate))
def close_comm(self):
self.t_stop.set()
self.ser.close()
def send_cmd(self, cmd):
if self.ser.isOpen():
self.ser.write(cmd.encode())
def read_thread(self, ser, q, stop_event):
while (not stop_event.is_set()):
if ser.isOpen():
q.put([str(ser.readline()), datetime.now()])
time.sleep(0.01)
def read_serialbuff(self):
data = []
while self.q.qsize() > 0:
data.append(self.q.get())
return data
def clean_serialbuff(self):
while len(self.read_serialbuff()) > 0:
pass
def get_property(self, prop):
if prop not in self.properties.keys():
return "Property not found!"
else:
self.send_cmd("Set Passkey(1000)\r\n")
self.clean_serialbuff()
self.send_cmd("Get "+prop+"\r\n")
time.sleep(0.2)
ret = self.read_serialbuff()
for line in ret:
line_list = line[0].replace("b'","").replace("\\r\\n'","").replace("\\x13\\x11","").split("\\t")
for item in line_list:
if prop.lower() == item.lower():
self.properties[prop]["Value"] = line_list[3:]
self.properties[prop]["Last Value Update"] = line[1]
elif "ERROR" == item.lower():
return line_list[-1]
self.send_cmd("Set Passkey(1)\r\n")
return self.properties[prop]
def get_all_property(self):
for prop in self.properties.keys():
self.get_property(prop)
return self.properties
def set_outputdir(self, output_dir):
if os.path.isdir(output_dir):
if "/" in output_dir[-1]:
self.output_dir = output_dir
else:
self.output_dir = output_dir+"/"
else:
print("Diretório invalido!")
def to_csv(self, data_mean = False, data = False):
if self.output_dir is not "":
if data:
if self.last_output < self.data["Scan"].iloc[-1]:
df = self.data[self.data["Scan"] > self.last_output]
self.last_output = self.data["Scan"].iloc[-1]
else:
df = self.data
csvFilePath = self.output_dir+"td263data.csv"
if not os.path.isfile(csvFilePath):
df.to_csv(csvFilePath, mode='a', index=False)
else:
df.to_csv(csvFilePath, mode='a', index=False, header=False)
if data_mean:
if self.last_len_mean < len(self.data_mean):
df = self.data_mean[self.last_len_mean:]
self.last_len_mean = len(self.data_mean)
else:
df = self.data_mean
csvFilePath = self.output_dir+"td263data_mean.csv"
if not os.path.isfile(csvFilePath):
df.to_csv(csvFilePath, mode='a', index=False)
else:
df.to_csv(csvFilePath, mode='a', index=False, header=False)
else:
print("Não foi configurado um diretório de destino!")
print("Use a função set_outputdir(output_dir).")
def do_measurement(self):
self.clean_serialbuff()
self.send_cmd("Do Sample\r\n")
time.sleep(1)
ret = self.read_serialbuff()
for line in ret:
line_list = line[0].replace("b'","").replace("\\r\\n'","").replace("\\x13\\x11","").split("\\t")
if "MEASUREMENT" in line_list[0]:
self.conductivity.append([float(line_list[4]), line[1]])
self.temperature.append([float(line_list[6]), line[1]])
self.salinity.append([float(line_list[8]), line[1]])
self.density.append([float(line_list[10]), line[1]])
self.soundspeed.append([float(line_list[12]), line[1]])
self.conductance.append([float(line_list[14]), line[1]])
self.data.loc[self.scan] = {"Scan": self.scan, "Time": line[1],
"Temperature[°C]": float(line_list[6]),
"Condutivity[mS/cm]": float(line_list[4]),
"Salinity[PSU]": float(line_list[8]),
"Density[kg/m3]": float(line_list[10]),
"Sound Speed[m/s]": float(line_list[12]),
"Conductance[mS]": float(line_list[14])}
"Conductance[S]": float(line_list[14])}
self.scan += 1
def do_mean(self, dt=timedelta(minutes=0)):
init = self.scan
start = datetime.now()
while (datetime.now() - start) < dt:
self.do_measurement()
data = self.data[init:]
scan = len(self.data_mean)+1
self.data_mean.loc[scan] = {"Scans": (self.scan-init), "Initial Time": start, "Final Time": datetime.now(),
"Temperature[°C]": data["Temperature[°C]"].mean(),
"Condutivity[mS/cm]": data["Condutivity[mS/cm]"].mean(),
"Salinity[PSU]": data["Salinity[PSU]"].mean(),
"Density[kg/m3]": data["Density[kg/m3]"].mean(),
"Sound Speed[m/s]": data["Sound Speed[m/s]"].mean(),
"Conductance[mS]": data["Conductance[mS]"].mean()}
|
callbacks_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import re
import shutil
import threading
import unittest
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.keras._impl.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class KerasCallbacksTest(test.TestCase):
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def test_EarlyStopping(self):
with self.test_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.test_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (float(keras.backend.get_value(model.optimizer.lr)) - 0.2
) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
epsilon=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)),
0.01,
atol=1e-4)
def test_CSVLogger(self):
with self.test_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
with open(filepath) as csvfile:
output = ' '.join(csvfile.readlines())
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
with self.test_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(keras.layers.Dense(2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
history = model.fit(x_train, y_train, batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks, epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
# case: Sequential
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=temp_dir, histogram_freq=1, write_images=True,
write_grads=True, batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0)
# fit generator with validation data and accuracy
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data and accuracy
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks)
assert os.path.exists(temp_dir)
def test_TensorBoard_histogram_freq_must_have_validation_data(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
with self.test_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
inp = keras.Input((INPUT_DIM,))
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(
log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit w/o validation data should raise ValueError if histogram_freq > 0
cbs = callbacks_factory(histogram_freq=1)
with self.assertRaises(ValueError):
model.fit(x_train, y_train, batch_size=BATCH_SIZE,
callbacks=cbs, epochs=3)
for cb in cbs:
cb.on_train_end()
# fit generator without validation data should raise ValueError if
# histogram_freq > 0
cbs = callbacks_factory(histogram_freq=1)
with self.assertRaises(ValueError):
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=cbs)
for cb in cbs:
cb.on_train_end()
# fit generator with validation data generator should raise ValueError if
# histogram_freq > 0
cbs = callbacks_factory(histogram_freq=1)
with self.assertRaises(ValueError):
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=data_generator(False),
validation_steps=1,
callbacks=cbs)
for cb in cbs:
cb.on_train_end()
# Make sure file writer cache is clear to avoid failures during cleanup.
writer_cache.FileWriterCache.clear()
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
with self.test_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
else:
yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
i += 1
i %= max_batch_index
inp1 = keras.Input((INPUT_DIM,))
inp2 = keras.Input((INPUT_DIM,))
inp = keras.layers.add([inp1, inp2])
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model([inp1, inp2], [output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit without validation data
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.test_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.5, patience=4, verbose=1),
keras.callbacks.TensorBoard(log_dir=temp_dir)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert os.path.exists(temp_dir)
if __name__ == '__main__':
test.main()
|
launch_dataset_dynamic.py
|
import random
import numpy as np
import time
import pickle
from multiprocessing import Process, Manager
from .instance_mcnf import generate_instance, mutate_instance
from .mcnf_dynamic import *
def launch_dataset(global_path, dataset_name, algorithm_list, nb_repetitions, nb_workers, duration_before_timeout):
# Launches all the algorithms to test on the instance present in the dataset directory
# The number of time algorithms are lauched is decided with nb_repetitions
# nb_workers is the number of parallel threads used
# if an algorithm takes more than duration_before_timeout time to finish, its thread is killed
# Open the file containing the name of the instances
instance_name_file = open(global_path + "/Dynamic_mcnf_paper_code/instance_files_dynamic/" + dataset_name + "/instance_name_file.p", "rb" )
instance_name_list = pickle.load(instance_name_file)
instance_name_file.close()
log_file = open(global_path + "/Dynamic_mcnf_paper_code/log_file.txt", 'w')
log_file.write("Start\n")
log_file.close()
manager = Manager()
worker_list = []
# Dictionnary of results
result_dict = {algorithm_name : {instance_name : [None]*nb_repetitions for instance_name in instance_name_list} for algorithm_name in algorithm_list}
# List of jobs to be done
computation_list = [(repetition_index, instance_index, instance_name, algorithm_name) for repetition_index in range(nb_repetitions)
for instance_index, instance_name in enumerate(instance_name_list)
for algorithm_name in algorithm_list]
while len(computation_list) + len(worker_list) > 0:
remaining_worker_list = []
# Cleaning finished jobs and killing jobs lasting more than duration_before_timeout
for process, start_time, return_list, computation_info in worker_list:
repetition_index, instance_index, instance_name, algorithm_name = computation_info
if not process.is_alive():
result_dict[algorithm_name][instance_name][repetition_index] = return_list[0]
elif time.time() > start_time + duration_before_timeout:
process.terminate()
result_dict[algorithm_name][instance_name][repetition_index] = (None, None, None, None, None, duration_before_timeout)
else:
remaining_worker_list.append((process, start_time, return_list, computation_info))
worker_list = remaining_worker_list
# Launching new jobs if possible
if len(worker_list) < nb_workers and len(computation_list) > 0:
computation_info = computation_list.pop(0)
repetition_index, instance_index, instance_name, algorithm_name = computation_info
print_string = "repetition : {0}/{1}, instance : {2}/{3}, algorithm : {4}".format(repetition_index, nb_repetitions, instance_index, len(instance_name_list), algorithm_name)
instance_file_path = global_path + "/Dynamic_mcnf_paper_code/instance_files_dynamic/" + dataset_name + "/" + instance_name + ".p"
return_list = manager.list()
process = Process(target=launch_solver_on_instance, args=(instance_file_path, algorithm_name, print_string, global_path, return_list))
start_time = time.time()
process.start()
worker_list.append((process, start_time, return_list, computation_info))
# Write the results in a file
result_file = open(global_path + "/Dynamic_mcnf_paper_code/instance_files_dynamic/" + dataset_name + "/result_file.p", "wb" )
pickle.dump(result_dict, result_file)
result_file.close()
import datetime
log_file = open(global_path + "/Dynamic_mcnf_paper_code/log_file.txt", 'a')
log_file.write(datetime.datetime.now().__str__())
log_file.close()
def launch_solver_on_instance(instance_file_path, algorithm_name, print_string, global_path, return_list):
# Lauch the algorithm named algortihm_name on the instance store in the file at instance_file_path
print(print_string)
# Read the instance in the instance file
instance_file = open(instance_file_path, "rb" )
instance_list, initial_path_list = pickle.load(instance_file)
instance_file.close()
initial_graph, initial_commodity_list = instance_list.pop(0)
nb_commodities = len(initial_commodity_list)
nb_timesteps = len(instance_list)
nb_nodes = len(initial_graph)
print(len(initial_graph))
print(nb_commodities)
temp = time.time()
# Launch the chosen algorithm
if algorithm_name == "SRR arc node" : results_list = iterate_one_timestep_solver(instance_list, initial_path_list, SRR_arc_node_one_timestep, verbose=0)
if algorithm_name == "SRR arc path" : results_list = iterate_one_timestep_solver(instance_list, initial_path_list, SRR_arc_path_one_timestep, verbose=0)
if algorithm_name == "SRR arc node no penalization" : results_list = iterate_one_timestep_solver(instance_list, initial_path_list, SRR_arc_node_one_timestep, solver_keyword_arguments={"flow_penalisation" : 0}, verbose=0)
if algorithm_name == "SRR arc path no penalization" : results_list = iterate_one_timestep_solver(instance_list, initial_path_list, SRR_arc_path_one_timestep, solver_keyword_arguments={"flow_penalisation" : 0}, verbose=0)
if algorithm_name == "SRR restricted" : results_list = iterate_one_timestep_solver(instance_list, initial_path_list, SRR_arc_path_one_timestep, solver_keyword_arguments={"nb_path_generations" : 0}, verbose=0)
if algorithm_name == "SRR restricted multi-time-step" : results_list = SRR_arc_path2(instance_list, initial_path_list, verbose=0)
if algorithm_name == "B&B restricted short" : results_list = iterate_one_timestep_solver(instance_list, initial_path_list, Branch_and_Bound_arc_path_one_timestep, solver_keyword_arguments={"time_limit" : 1.7 ** (np.sqrt(nb_nodes)) / 50, "nb_threads" : 1}, verbose=0)
if algorithm_name == "B&B restricted medium" : results_list = iterate_one_timestep_solver(instance_list, initial_path_list, Branch_and_Bound_arc_path_one_timestep, solver_keyword_arguments={"time_limit" : 1.7 ** (np.sqrt(nb_nodes)) / 10, "nb_threads" : 1}, verbose=0)
if algorithm_name == "B&B restricted long" : results_list = iterate_one_timestep_solver(instance_list, initial_path_list, Branch_and_Bound_arc_path_one_timestep, solver_keyword_arguments={"time_limit" : 1.7 ** (np.sqrt(nb_nodes)) / 2, "nb_threads" : 1}, verbose=0)
if algorithm_name == "Partial B&B restricted" : results_list = iterate_one_timestep_solver(instance_list, initial_path_list, Branch_and_Bound_arc_path_one_timestep, solver_keyword_arguments={"nb_new_binary_var" : 1 + nb_commodities // 10, "time_limit" : 1.7 ** (np.sqrt(nb_nodes)) / 40, "nb_threads" : 1}, verbose=0)
if algorithm_name == "SRR path-combination" : results_list = SRR_path_combinations(instance_list, initial_path_list)
if algorithm_name == "SRR path-combination no penalization" : results_list = SRR_path_combinations(instance_list, initial_path_list, flow_penalisation=0)
if algorithm_name == "SRR path-combination restricted" : results_list = SRR_path_combinations(instance_list, initial_path_list, exact_var_generation=False)
if algorithm_name == "SRR path-combination commodity" : results_list = SRR_path_combinations2(instance_list, initial_path_list)
if algorithm_name == "SRR path-combination timestep" : results_list = SRR_path_combinations2(instance_list, initial_path_list, rounding_method="round_by_timestep")
computing_time = time.time() - temp
total_path_changes, min_nb_of_path_changes, path_changes_ratio, total_overload, overload_ratio = analyse_results_list(instance_list, initial_path_list, results_list)
log_file = open(global_path + "/Dynamic_mcnf_paper_code/log_file.txt", 'a')
log_file.write("Finished : " + instance_file_path + ", " + print_string + "\n")
log_file.close()
print("Finished")
return_list.append((total_path_changes, min_nb_of_path_changes, path_changes_ratio, total_overload, overload_ratio, computing_time))
def analyse_results_list(instance_list, initial_path_list, results_list):
# Compute various metrics extracted from the solution of an algorithm
nb_timesteps = len(instance_list)
allowed_overflow = sum([commodity[2] for commodity in instance_list[0][1]]) * 0.01
old_path_list = initial_path_list
total_path_changes = total_overload = 0
min_nb_of_path_changes, _ = compute_mininmum_number_of_path_changes(instance_list, initial_path_list)
for instance, new_path_list in zip(instance_list, results_list):
nb_path_changes = 0
graph, commodity_list = instance
use_graph = [{neighbor : 0 for neighbor in graph[node]} for node in range(len(graph))]
for commodity, old_path, new_path in zip(commodity_list, old_path_list, new_path_list):
update_graph_capacity(use_graph, new_path, -commodity[2])
if old_path != new_path:
nb_path_changes += 1
overload_graph = [{neighbor : max(0, use_graph[node][neighbor] - graph[node][neighbor]) for neighbor in graph[node]} for node in range(len(graph))]
overload = sum([sum(dct.values()) for dct in overload_graph])
total_overload += max(0, overload - allowed_overflow)
total_path_changes += nb_path_changes
old_path_list = new_path_list
path_changes_ratio = total_path_changes / min_nb_of_path_changes
overload_ratio = total_overload / (allowed_overflow * nb_timesteps)
return total_path_changes, min_nb_of_path_changes, path_changes_ratio, total_overload, overload_ratio
if __name__ == "__main__":
# Set the path to the global directory
# global_path = "/home/disc/f.lamothe"
global_path = "/home/francois/Desktop"
# assert False, "Unassigned global_path : Complete global_path with the path to the main directory"
# Set the number of repetition
nb_repetitions = 1
nb_workers = 30
duration_before_timeout = 3*60*60
settings_list = []
settings_list.append(("graph_scaling_dataset_easy", ["SRR arc node", "SRR arc path", "SRR arc node no penalization", "SRR arc path no penalization", 'SRR restricted', 'SRR restricted multi-time-step', "B&B restricted short", "B&B restricted medium", "B&B restricted long", "SRR path-combination", "SRR path-combination no penalization", "SRR path-combination timestep", "SRR path-combination commodity", "SRR path-combination restricted"]))
settings_list.append(("graph_scaling_dataset_hard", ["SRR arc node", "SRR arc path", "SRR arc node no penalization", "SRR arc path no penalization", 'SRR restricted', 'SRR restricted multi-time-step', "B&B restricted short", "B&B restricted medium", "B&B restricted long", "SRR path-combination", "SRR path-combination no penalization", "SRR path-combination timestep", "SRR path-combination commodity", "SRR path-combination restricted"]))
settings_list.append(("graph_scaling_dataset_random", ["SRR arc node", "SRR arc path", "SRR arc node no penalization", "SRR arc path no penalization", 'SRR restricted', 'SRR restricted multi-time-step', "B&B restricted short", "B&B restricted medium", "SRR path-combination", "SRR path-combination no penalization", "SRR path-combination timestep", "SRR path-combination commodity", "SRR path-combination restricted"]))
settings_list.append(("commodity_scaling_dataset", ["SRR arc node", "SRR arc path", "SRR arc node no penalization", "SRR arc path no penalization", 'SRR restricted', 'SRR restricted multi-time-step', "B&B restricted short", "B&B restricted medium", "B&B restricted long", "SRR path-combination", "SRR path-combination no penalization", "SRR path-combination timestep", "SRR path-combination commodity", "SRR path-combination restricted"]))
for dataset_name, algorithm_list in settings_list:
launch_dataset(global_path, dataset_name, algorithm_list, nb_repetitions, nb_workers, duration_before_timeout)
|
__init__.py
|
"""Hermes MQTT server for Rhasspy wakeword with pocketsphinx"""
import asyncio
import logging
import queue
import socket
import tempfile
import threading
import typing
from pathlib import Path
import pocketsphinx
from rhasspyhermes.audioserver import AudioFrame
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs
from rhasspyhermes.wake import (
HotwordDetected,
HotwordError,
HotwordToggleOff,
HotwordToggleOn,
HotwordToggleReason,
)
WAV_HEADER_BYTES = 44
_LOGGER = logging.getLogger("rhasspywake_pocketsphinx_hermes")
# -----------------------------------------------------------------------------
class WakeHermesMqtt(HermesClient):
"""Hermes MQTT server for Rhasspy wakeword with pocketsphinx."""
def __init__(
self,
client,
keyphrase: str,
acoustic_model: Path,
dictionary_paths: typing.List[Path],
wakeword_id: str = "",
keyphrase_threshold: float = 1e-40,
mllr_matrix: typing.Optional[Path] = None,
site_ids: typing.Optional[typing.List[str]] = None,
enabled: bool = True,
sample_rate: int = 16000,
sample_width: int = 2,
channels: int = 1,
chunk_size: int = 960,
udp_audio: typing.Optional[typing.List[typing.Tuple[str, int, str]]] = None,
udp_chunk_size: int = 2048,
debug: bool = False,
lang: typing.Optional[str] = None,
):
super().__init__(
"rhasspywake_pocketsphinx_hermes",
client,
sample_rate=sample_rate,
sample_width=sample_width,
channels=channels,
site_ids=site_ids,
)
self.subscribe(AudioFrame, HotwordToggleOn, HotwordToggleOff)
self.keyphrase = keyphrase
self.keyphrase_threshold = keyphrase_threshold
self.acoustic_model = acoustic_model
self.dictionary_paths = dictionary_paths
self.mllr_matrix = mllr_matrix
self.wakeword_id = wakeword_id
self.enabled = enabled
self.disabled_reasons: typing.Set[str] = set()
# Required audio format
self.sample_rate = sample_rate
self.sample_width = sample_width
self.channels = channels
self.chunk_sizes = dict()
# Queue of WAV audio chunks to process (plus site_id)
self.wav_queues = dict()
self.first_audios = dict()
self.audio_buffers = dict()
self.decoders = dict()
self.decoders_started = dict()
self.debug = debug
for site_id in self.site_ids:
self.audio_buffers[site_id] = bytes()
self.first_audios[site_id] = True
self.wav_queues[site_id] = queue.Queue()
self.decoders[site_id] = []
self.chunk_sizes[site_id] = chunk_size
self.decoders_started[site_id] = False
# Start threads
threading.Thread(target=self.detection_thread_proc, daemon=True, args=(site_id,)).start()
self.lang = lang
# Listen for raw audio on UDP too
self.udp_chunk_size = udp_chunk_size
if udp_audio:
for udp_host, udp_port, udp_site_id in udp_audio:
threading.Thread(
target=self.udp_thread_proc,
args=(udp_host, udp_port, udp_site_id),
daemon=True,
).start()
# -------------------------------------------------------------------------
def load_decoder(self, site_id):
"""Load Pocketsphinx decoder."""
_LOGGER.debug(
"Loading decoder with hmm=%s, dicts=%s",
str(self.acoustic_model),
self.dictionary_paths,
)
words_needed = set(self.keyphrase.split())
with tempfile.NamedTemporaryFile(mode="w+", suffix=".txt") as dict_file:
# Combine all dictionaries
for sub_dict_path in self.dictionary_paths:
if not sub_dict_path.is_file():
_LOGGER.warning("Skipping dictionary %s", str(sub_dict_path))
continue
with open(sub_dict_path, "r") as sub_dict_file:
for line in sub_dict_file:
line = line.strip()
if line:
word = line.split(maxsplit=2)[0]
if word in words_needed:
print(line, file=dict_file)
words_needed.remove(word)
assert (
len(words_needed) == 0
), f"Missing pronunciations for words: {words_needed}"
dict_file.seek(0)
decoder_config = pocketsphinx.Decoder.default_config()
decoder_config.set_string("-hmm", str(self.acoustic_model))
decoder_config.set_string("-dict", str(dict_file.name))
decoder_config.set_string("-keyphrase", self.keyphrase)
decoder_config.set_float("-kws_threshold", self.keyphrase_threshold)
if not self.debug:
decoder_config.set_string("-logfn", "/dev/null")
if self.mllr_matrix and self.mllr_matrix.is_file():
decoder_config.set_string("-mllr", str(self.mllr_matrix))
self.decoders[site_id] = pocketsphinx.Decoder(decoder_config)
# -------------------------------------------------------------------------
async def handle_audio_frame(self, wav_bytes: bytes, site_id: str = "default"):
"""Process a single audio frame"""
self.wav_queues[site_id].put((wav_bytes, site_id))
async def handle_detection(
self, wakeword_id: str, site_id: str = "default"
) -> typing.AsyncIterable[
typing.Union[typing.Tuple[HotwordDetected, TopicArgs], HotwordError]
]:
"""Handle a successful hotword detection"""
try:
yield (
HotwordDetected(
site_id=site_id,
model_id=self.keyphrase,
current_sensitivity=self.keyphrase_threshold,
model_version="",
model_type="personal",
lang=self.lang,
),
{"wakeword_id": wakeword_id},
)
except Exception as e:
_LOGGER.exception("handle_detection")
yield HotwordError(error=str(e), context=self.keyphrase, site_id=site_id)
def detection_thread_proc(self, site_id):
"""Handle WAV audio chunks."""
try:
while True:
wav_bytes, site_id = self.wav_queues[site_id].get()
if self.first_audios[site_id]:
_LOGGER.debug("Receiving audio %s", site_id)
self.first_audios[site_id] = False
if not self.decoders[site_id]:
self.load_decoder(site_id)
assert self.decoders[site_id] is not None
# Extract/convert audio data
audio_data = self.maybe_convert_wav(wav_bytes)
# Add to persistent buffer
self.audio_buffers[site_id] += audio_data
# Process in chunks.
# Any remaining audio data will be kept in buffer.
while len(self.audio_buffers[site_id]) >= self.chunk_sizes[site_id]:
chunk = self.audio_buffers[site_id][: self.chunk_sizes[site_id]]
self.audio_buffers[site_id] = self.audio_buffers[site_id][self.chunk_sizes[site_id] :]
if not self.decoders_started[site_id]:
# Begin utterance
self.decoders[site_id].start_utt()
self.decoders_started[site_id] = True
self.decoders[site_id].process_raw(chunk, False, False)
hyp = self.decoders[site_id].hyp()
if hyp:
if self.decoders_started[site_id]:
# End utterance
self.decoders[site_id].end_utt()
self.decoders_started[site_id] = False
wakeword_id = self.wakeword_id
if not wakeword_id:
wakeword_id = self.keyphrase
asyncio.run_coroutine_threadsafe(
self.publish_all(
self.handle_detection(wakeword_id, site_id=site_id)
),
self.loop,
)
# Stop and clear buffer to avoid duplicate reports
self.audio_buffers[site_id] = bytes()
break
except Exception:
_LOGGER.exception("detection_thread_proc")
# -------------------------------------------------------------------------
def udp_thread_proc(self, host: str, port: int, site_id: str):
"""Handle WAV chunks from UDP socket."""
try:
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.bind((host, port))
_LOGGER.debug("Listening for audio on UDP %s:%s", host, port)
while True:
wav_bytes, _ = udp_socket.recvfrom(
self.udp_chunk_size + WAV_HEADER_BYTES
)
if self.enabled:
self.wav_queues[site_id].put((wav_bytes, site_id))
except Exception:
_LOGGER.exception("udp_thread_proc")
# -------------------------------------------------------------------------
async def on_message_blocking(
self,
message: Message,
site_id: typing.Optional[str] = None,
session_id: typing.Optional[str] = None,
topic: typing.Optional[str] = None,
) -> GeneratorType:
"""Received message from MQTT broker."""
# Check enable/disable messages
if isinstance(message, HotwordToggleOn):
if message.reason == HotwordToggleReason.UNKNOWN:
# Always enable on unknown
self.disabled_reasons.clear()
else:
self.disabled_reasons.discard(message.reason)
if self.disabled_reasons:
_LOGGER.debug("Still disabled: %s", self.disabled_reasons)
else:
self.enabled = True
self.first_audios[site_id] = True
_LOGGER.debug("Enabled")
elif isinstance(message, HotwordToggleOff):
self.enabled = False
self.disabled_reasons.add(message.reason)
# End utterance
if self.decoders[site_id] and self.decoders_started[site_id]:
self.decoders[site_id].end_utt()
self.decoders_started[site_id] = False
_LOGGER.debug("Disabled")
elif isinstance(message, AudioFrame):
if self.enabled:
assert site_id, "Missing site_id"
await self.handle_audio_frame(message.wav_bytes, site_id=site_id)
else:
_LOGGER.warning("Unexpected message: %s", message)
# Mark as async generator
yield None
|
tello-stream.py
|
#!/usr/bin/python3
"""
@brief This is program for stream video from Tello camera.
@author Murtadha Bazli Tukimat
@date 17-Nov-2020
"""
import threading
import socket
import cv2
""" Welcome note """
print("\nTello Video Stream Program\n")
class Tello:
def __init__(self):
self._running = True
self.video = cv2.VideoCapture("udp://@0.0.0.0:11111")
def terminate(self):
self._running = False
self.video.release()
cv2.destroyAllWindows()
def recv(self):
""" Handler for Tello states message """
while self._running:
try:
ret, frame = self.video.read()
if ret:
# Resize frame
height, width, _ = frame.shape
new_h = int(height / 2)
new_w = int(width / 2)
# Resize for improved performance
new_frame = cv2.resize(frame, (new_w, new_h))
# Display the resulting frame
cv2.imshow('Tello', new_frame)
# Wait for display image frame
# cv2.waitKey(1) & 0xFF == ord('q'):
cv2.waitKey(1)
except Exception as err:
print(err)
""" Start new thread for receive Tello response message """
t = Tello()
recvThread = threading.Thread(target=t.recv)
recvThread.start()
while True:
try:
# Get input from CLI
msg = input()
# Check for "end"
if msg == "bye":
t.terminate()
recvThread.join()
print("\nGood Bye\n")
break
except KeyboardInterrupt:
t.terminate()
recvThread.join()
break
|
finder.py
|
# Copyright (C) [2015-2017] [Thomson Reuters LLC]
# Copyright (C) [2015-2017] [Panos Kittenis]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graphite-Api storage finder for InfluxDB.
Read metric series from an InfluxDB database via a Graphite-API storage plugin
compatible API.
"""
from __future__ import absolute_import, print_function
import json
import threading
from multiprocessing import Lock as processLock
import time
import datetime
import logging
from logging.handlers import WatchedFileHandler
from collections import deque
from influxdb import InfluxDBClient
from graphite_api.node import BranchNode
from ..constants import _INFLUXDB_CLIENT_PARAMS, \
SERIES_LOADER_MUTEX_KEY, LOADER_LIMIT, MEMCACHE_SERIES_DEFAULT_TTL, \
DEFAULT_AGGREGATIONS, _MEMCACHE_FIELDS_KEY, FILL_PARAMS, FILE_LOCK
from ..utils import calculate_interval, \
get_aggregation_func, gen_memcache_key, gen_memcache_pattern_key, \
get_retention_policy, _compile_aggregation_patterns, \
make_memcache_client
from ..templates import parse_influxdb_graphite_templates, apply_template, \
TemplateMatchError
try:
from ..ext.templates import parse_series, read_influxdb_values
except ImportError:
from ..utils import parse_series, read_influxdb_values
from .reader import InfluxDBReader
from .leaf import InfluxDBLeafNode
from .tree import NodeTreeIndex
from .lock import FileLock
_SERIES_LOADER_LOCK = processLock()
logger = logging.getLogger('influxgraph')
class InfluxDBFinder(object):
"""Graphite-Api finder for InfluxDB.
Finds and fetches metric series from InfluxDB.
"""
__fetch_multi__ = 'influxdb'
__slots__ = ('client', 'aggregation_functions',
'memcache', 'memcache_host', 'memcache_ttl',
'memcache_series_loader_mutex_key', 'memcache_fields_key',
'deltas', 'retention_policies', 'index', 'reader',
'index_lock', 'index_path', 'graphite_templates',
'loader_limit', 'fill_param')
def __init__(self, config):
influxdb_config = config.get('influxdb', {})
self.client = InfluxDBClient(influxdb_config.get('host', 'localhost'),
influxdb_config.get('port', '8086'),
influxdb_config.get('user', 'root'),
influxdb_config.get('pass', 'root'),
influxdb_config.get('db', 'graphite'),
influxdb_config.get('ssl', 'false'),)
self._setup_logger(influxdb_config.get('log_level', 'info'),
influxdb_config.get('log_file', None))
memcache_conf = influxdb_config.get('memcache', {})
memcache_host = memcache_conf.get('host')
self.memcache_ttl = memcache_conf.get(
'ttl', MEMCACHE_SERIES_DEFAULT_TTL)
self.memcache_series_loader_mutex_key = memcache_conf.get(
'series_loader_mutex_key', SERIES_LOADER_MUTEX_KEY)
self.memcache_fields_key = memcache_conf.get(
'fields_key', _MEMCACHE_FIELDS_KEY)
self.memcache = make_memcache_client(
memcache_host, memcache_max_value=memcache_conf.get('max_value', 1))
self.aggregation_functions = _compile_aggregation_patterns(
influxdb_config.get('aggregation_functions', DEFAULT_AGGREGATIONS))
self.fill_param = influxdb_config.get('fill', 'null')
if self.fill_param not in FILL_PARAMS and not (
isinstance(self.fill_param, int) or isinstance(
self.fill_param, float)):
raise Exception("Configured fill param %s is not a valid parameter "
"nor integer or float number", self.fill_param,)
series_loader_interval = influxdb_config.get(
'series_loader_interval', 900)
reindex_interval = influxdb_config.get('reindex_interval', 900)
self.loader_limit = influxdb_config.get('loader_limit', LOADER_LIMIT)
if not isinstance(self.loader_limit, int):
raise Exception("Configured loader limit %s is not an integer",
self.loader_limit)
self.deltas = influxdb_config.get('deltas', None)
self.retention_policies = influxdb_config.get(
'retention_policies', None)
logger.debug("Configured aggregation functions - %s",
self.aggregation_functions,)
templates = influxdb_config.get('templates')
self.graphite_templates = parse_influxdb_graphite_templates(templates) \
if templates else None
loader_startup_block = influxdb_config.get('loader_startup_block', True)
self._start_loader(series_loader_interval, loader_startup_block)
self.index = None
self.index_path = config.get('search_index')
self.index_lock = FileLock(influxdb_config.get('index_lock_file',
FILE_LOCK))
self.reader = InfluxDBReader(
self.client, None,
aggregation_functions=self.aggregation_functions,
memcache=self.memcache,
deltas=self.deltas)
self._start_reindexer(reindex_interval)
def _start_loader(self, series_loader_interval, loader_startup_block):
# No memcached configured? Cannot use series loader
if not self.memcache:
return
# Run series loader in main thread if due to run to not allow
# requests to be served before series loader has completed at
# least once.
if loader_startup_block and _SERIES_LOADER_LOCK.acquire(block=False):
try:
if self.memcache.get(self.memcache_series_loader_mutex_key):
logger.debug("Series loader mutex exists %s - "
"skipping series load",
self.memcache_series_loader_mutex_key)
else:
logger.info(
"Starting initial series list load - this may "
"take several minutes on databases with a large "
"number of series..")
self.memcache.set(self.memcache_series_loader_mutex_key, 1,
time=series_loader_interval)
try:
if self.graphite_templates:
self.get_field_keys()
for _ in self.get_all_series_list():
pass
except Exception as ex:
logger.error(
"Error calling InfluxDB from initial series "
"and field list load - %s", ex)
finally:
_SERIES_LOADER_LOCK.release()
else:
logger.warning(
"Configured to not block at startup while loading index. "
"No data will be returned until index is built for the "
"first time.")
loader = threading.Thread(target=self._series_loader,
kwargs={'interval': series_loader_interval})
loader.daemon = True
loader.start()
def _start_reindexer(self, reindex_interval):
new_index = False
if not self.index:
self.load_index()
if not self.index:
self.build_index()
new_index = True
logger.debug("Starting reindexer thread with interval %s",
reindex_interval)
reindexer = threading.Thread(target=self._reindex,
kwargs={'interval': reindex_interval,
'new_index': new_index})
reindexer.daemon = True
reindexer.start()
def _setup_logger(self, level, log_file):
"""Setup log level and log file if set"""
if not level:
return
if logger.handlers:
return
if hasattr(logging, 'NullHandler'):
logger.addHandler(logging.NullHandler())
formatter = logging.Formatter(
'[%(levelname)s] %(asctime)s - %(module)s.%(funcName)s() '
'- %(message)s')
level = getattr(logging, level.upper())
logger.setLevel(level)
handler = logging.StreamHandler()
logger.addHandler(handler)
handler.setFormatter(formatter)
if not log_file:
return
try:
_handler = WatchedFileHandler(log_file)
except IOError:
logger.error("Could not write to %s, falling back to stdout",
log_file)
else:
logger.addHandler(_handler)
_handler.setFormatter(formatter)
def get_series(self, cache=True, offset=0):
"""Retrieve series names from InfluxDB according to query pattern
:param query: Query to run to get series names
:type query: :mod:`graphite_api.storage.FindQuery` compatible class
"""
memcache_key = gen_memcache_pattern_key("_".join([
'*', str(self.loader_limit), str(offset)]))
cached_series = self.memcache.get(memcache_key) \
if self.memcache and cache else None
if cached_series is not None:
logger.debug("Found cached series for limit %s, "
"offset %s", self.loader_limit, offset)
return cached_series
series = self._get_series(offset=offset)
if self.memcache:
self.memcache.set(memcache_key, series, time=self.memcache_ttl,
min_compress_len=50)
return series
def _get_series(self, offset=0):
memcache_key = gen_memcache_pattern_key("_".join([
'*', str(self.loader_limit), str(offset)]))
_query = "SHOW SERIES LIMIT %s OFFSET %s" % (self.loader_limit, offset,)
logger.debug("Series loader calling influxdb with query - %s", _query)
data = self.client.query(_query, params=_INFLUXDB_CLIENT_PARAMS)
series = [d.get('key') for k in data for d in k if d]
if self.memcache:
self.memcache.set(memcache_key, series, time=self.memcache_ttl,
min_compress_len=50)
return series
def _store_last_offset(self, query_pattern, limit, offset):
if offset and self.memcache:
# Store empty list at offset+last limit to indicate
# that this is the last page
last_offset = offset + limit
logger.debug("Pagination finished for query pattern %s "
"- storing empty array for limit %s and "
"last offset %s",
query_pattern, limit, offset,)
memcache_key = gen_memcache_pattern_key("_".join([
query_pattern, str(limit), str(last_offset)]))
self.memcache.set(memcache_key, [], time=self.memcache_ttl)
def get_all_series(self, cache=True,
offset=0, _data=None, **kwargs):
"""Retrieve all series"""
# pylint: disable=unused-argument
data = self.get_series(
cache=cache, offset=offset)
return self._pagination_runner(data, '*', self.get_all_series,
limit=self.loader_limit,
cache=cache,
offset=offset)
def get_all_series_list(self, offset=0, _data=None,
*args, **kwargs):
"""Retrieve all series for series loader"""
# pylint: disable=unused-argument
query_pattern = '*'
data = self._get_series(offset=offset)
return self._pagination_runner(
data, query_pattern, self.get_all_series_list,
limit=self.loader_limit, offset=offset)
def _pagination_runner(self, data, query_pattern, get_series_func,
limit=None, offset=None, _data=None,
*args, **kwargs):
if not _data:
_data = []
if data:
if len(data) < limit:
self._store_last_offset(query_pattern, limit, offset)
return _data + data
offset = limit + offset
return data + get_series_func(
*args, limit=limit, offset=offset,
_data=_data, **kwargs)
self._store_last_offset(query_pattern, limit, offset)
return data
def _series_loader(self, interval=900):
"""Loads influxdb series list into memcache at a rate of no
more than once per interval
"""
logger.info("Starting background series loader with interval %s",
interval)
while True:
time.sleep(interval)
if _SERIES_LOADER_LOCK.acquire(block=False):
_SERIES_LOADER_LOCK.release()
if self.memcache.get(self.memcache_series_loader_mutex_key):
logger.debug("Series loader mutex exists %s - "
"skipping series load",
self.memcache_series_loader_mutex_key)
time.sleep(interval)
continue
self.memcache.set(self.memcache_series_loader_mutex_key, 1,
time=interval)
start_time = datetime.datetime.now()
logger.debug("Starting series list loader..")
_SERIES_LOADER_LOCK.acquire()
try:
if self.graphite_templates:
self.get_field_keys()
for _ in self.get_all_series_list():
pass
except Exception as ex:
logger.error("Error calling InfluxDB from series loader - %s",
ex,)
time.sleep(interval)
continue
finally:
_SERIES_LOADER_LOCK.release()
dt = datetime.datetime.now() - start_time
logger.debug("Series list loader finished in %s", dt)
def find_nodes(self, query):
"""Find and return nodes matching query
:param query: Query to search for
:type query: :mod:`influxgraph.utils.Query`
"""
node_paths = self.index.query(query.pattern)
for path, node in node_paths:
if node.is_leaf():
# Set path on existing reader to avoid having to create
# new objects for each path which is expensive
# Reader is not used for queries when multi fetch is enabled
# regardless
self.reader.path = path
yield InfluxDBLeafNode(path, self.reader)
else:
yield BranchNode(path)
def _gen_aggregation_func(self, paths):
aggregation_funcs = list(set(get_aggregation_func(
path, self.aggregation_functions) for path in paths))
if len(aggregation_funcs) > 1:
logger.warning("Got multiple aggregation functions %s for paths %s "
"- Using '%s'",
aggregation_funcs, paths, aggregation_funcs[0])
aggregation_func = aggregation_funcs[0]
return aggregation_func
def _get_template_values_from_paths(self, paths, _filter, template,
default_tags, separator,
measurement_data):
_measurements = deque()
_tags = {}
_fields = deque()
matched_paths = deque()
for path in paths:
if _filter and not _filter.match(path):
continue
try:
measurement, tags, field = apply_template(
path.split('.'), template, default_tags, separator)
except TemplateMatchError:
continue
if measurement not in _measurements:
_measurements.append(measurement)
for tag in tags:
if tag not in _tags or tags[tag] not in _tags[tag]:
_tags.setdefault(tag, []).append(tags[tag])
if not field:
field = 'value'
if field not in _fields:
_fields.append(field)
matched_paths.append(path)
measurement_data.setdefault(measurement, {}).setdefault(
'paths', []).append(path)
if field not in measurement_data[measurement].setdefault(
'fields', []):
measurement_data[measurement].setdefault(
'fields', []).append(field)
measurement_data[measurement].setdefault(
'template', template)
return _measurements, _tags, _fields, matched_paths
def _get_all_template_values(self, paths):
paths = paths[:]
measurement_data = {}
measurements, tags, fields = deque(), deque(), set()
for (_filter, template,
default_tags, separator) in self.graphite_templates:
# One influx measurement queried per template
if not paths:
break
_measurements, _tags, \
_fields, matched_paths = self._get_template_values_from_paths(
paths, _filter, template, default_tags, separator,
measurement_data)
if _measurements:
# Found template match for path, append query data and
# remove matched paths so we do not try to match them again
measurements.extend(_measurements)
if _tags:
tags.append(_tags)
fields = fields.union(_fields)
for path in matched_paths:
del paths[paths.index(path)]
return measurements, tags, fields, measurement_data
def _gen_query(self, measurements, tags, fields, retention):
groupings = set([k for t in tags for k in t.keys()])
measurements = ', '.join(
('"%s"."%s"' % (retention, measure,) for measure in measurements)) \
if retention \
else ', '.join(('"%s"' % (measure,) for measure in measurements))
_tags = ' OR '.join(['(%s)' % (tag_set,) for tag_set in [
' AND '.join(['(%s)' % ' OR '.join([
""""%s" = '%s'""" % (tag, tag_val,)
for tag_val in __tags[tag]])
for tag in __tags])
for __tags in tags]]) if tags else None
fields = fields if fields else ['value']
return measurements, _tags, fields, groupings
def _gen_query_values_from_templates(self, paths, retention):
measurements, tags, fields, measurement_data = \
self._get_all_template_values(paths)
measurements, tags, fields, groupings = self._gen_query(
measurements, tags, fields, retention)
return measurements, tags, fields, groupings, measurement_data
def _gen_query_values(self, paths, retention):
if self.graphite_templates:
return self._gen_query_values_from_templates(paths, retention)
measurement = ', '.join(('"%s"."%s"' % (retention, path,)
for path in paths)) if retention \
else ', '.join(('"%s"' % (path,)
for path in paths))
return measurement, None, ['value'], None, None
def _gen_infl_stmt(self, measurements, tags, fields, groupings, start_time,
end_time, aggregation_func, interval):
time_clause = "(time > %ds and time <= %ds)" % (start_time, end_time,)
query_fields = ', '.join(['%s("%s") as "%s"' % (
aggregation_func, field, field) for field in fields])
groupings = ['"%s"' % (grouping,) for grouping in groupings] \
if groupings else []
groupings.insert(0, 'time(%ss)' % (interval,))
groupings = ', '.join(groupings)
where_clause = "%s AND %s" % (time_clause, tags,) if tags else \
time_clause
group_by = '%s fill(%s)' % (groupings, self.fill_param,)
query = 'select %s from %s where %s GROUP BY %s' % (
query_fields, measurements, where_clause, group_by,)
return query
def _gen_influxdb_stmt(self, start_time, end_time, paths, interval,
aggregation_func):
retention = get_retention_policy(interval, self.retention_policies) \
if self.retention_policies else None
measurements, tags, fields, \
groupings, measurement_data = self._gen_query_values(
paths, retention)
query = self._gen_infl_stmt(measurements, tags, fields, groupings,
start_time, end_time, aggregation_func,
interval)
return query, measurement_data
def _make_empty_multi_fetch_result(self, time_info, paths):
data = {}
for key in paths:
data[key] = []
return time_info, data
def fetch_multi(self, nodes, start_time, end_time):
"""Fetch datapoints for all series between start and end times
:param nodes: List of nodes to retrieve data for
:type nodes: list(:mod:`influxgraph.classes.InfluxDBLeafNode`)
:param start_time: Start time of query
:param end_time: End time of query
"""
interval = calculate_interval(start_time, end_time, deltas=self.deltas)
time_info = start_time, end_time, interval
if not nodes:
return time_info, {}
paths = sorted([n.path for n in nodes if n.is_leaf])
if not len(paths) > 0:
return self._make_empty_multi_fetch_result(
time_info, [n.path for n in nodes])
aggregation_func = self._gen_aggregation_func(paths)
memcache_key = gen_memcache_key(start_time, end_time, aggregation_func,
paths)
data = self.memcache.get(memcache_key) if self.memcache else None
if data:
logger.debug("Found cached data for key %s", memcache_key)
return time_info, data
logger.debug('fetch_multi() - start_time: %s - '
'end_time: %s, interval %s',
datetime.datetime.fromtimestamp(float(start_time)),
datetime.datetime.fromtimestamp(float(end_time)), interval)
try:
query, measurement_data = self._gen_influxdb_stmt(
start_time, end_time, paths, interval, aggregation_func)
except TypeError as ex:
logger.error("Type error generating query statement - %s", ex)
return self._make_empty_multi_fetch_result(time_info, paths)
data = self._run_infl_query(query, paths, measurement_data)
# Do not cache empty responses
if self.memcache and sum([len(vals) for vals in data.values()]) > 0:
self.memcache.set(memcache_key, data,
time=interval,
min_compress_len=50)
return time_info, data
def _run_infl_query(self, query, paths, measurement_data):
logger.debug("Calling influxdb multi fetch with query - %s", query)
data = self.client.query(query, params=_INFLUXDB_CLIENT_PARAMS)
logger.debug('fetch_multi() - Retrieved %d result set(s)', len(data))
data = read_influxdb_values(data, paths, measurement_data)
# Graphite API requires that data contain keys for
# all requested paths even if they have no datapoints
for key in paths:
data.setdefault(key, [])
return data
def _read_static_data(self, data_file):
data = json.load(open(data_file))['results'][0]['series'][0]['values']
return [d for k in data for d in k if d]
def _reindex(self, new_index=False, interval=900):
"""Perform re-index"""
save_thread = threading.Thread(target=self.save_index)
if new_index:
save_thread.start()
del new_index
while True:
time.sleep(interval)
try:
save_thread.join()
except RuntimeError:
pass
finally:
del save_thread
try:
self.build_index()
except Exception as ex:
logger.error("Error occured in reindexing thread - %s", ex)
save_thread = threading.Thread(target=self.save_index)
save_thread.start()
def build_index(self, data=None, separator=b'.'):
"""Build new node tree index
:param data: (Optional) data to use to build index
:type data: list
"""
logger.info('Starting index build')
try:
data = self.get_all_series() if not data else data
except Exception as ex:
logger.error("Error getting series list from InfluxDB - %s -"
"Retrying after 30sec..", ex)
time.sleep(30)
return self.build_index()
all_fields = self.get_field_keys() if self.graphite_templates \
else None
with self.index_lock:
logger.info("Building index..")
start_time = datetime.datetime.now()
index = parse_series(data, all_fields, self.graphite_templates,
separator=separator)
self.index = index
logger.info("Finished building index in %s",
datetime.datetime.now() - start_time)
def _save_index_file(self, file_h):
"""Dump tree contents to file handle"""
if self.index:
json.dump(self.index.to_array(), file_h)
def save_index(self):
"""Save index to file"""
if not self.index_path:
return
if not (hasattr(self, 'index') and self.index
and hasattr(self.index, 'to_array')):
return
logger.info("Saving index to file %s", self.index_path,)
start_time = datetime.datetime.now()
try:
index_fh = open(self.index_path, 'wt')
self._save_index_file(index_fh)
except IOError as ex:
logger.error("Error writing to index file %s - %s",
self.index_path, ex)
return
except Exception as ex:
logger.error("Error saving index file %s - %s",
self.index_path, ex)
raise
else:
index_fh.close()
dt = datetime.datetime.now() - start_time
logger.info("Wrote index file to %s in %s", self.index_path, dt)
def load_index(self):
"""Load index from file"""
if not self.index_path:
return
if not (self.index and hasattr(self.index, 'from_file')):
return
logger.info("Loading index from file %s", self.index_path,)
try:
index_fh = open(self.index_path, 'rt')
except Exception as ex:
logger.error("Error reading index file %s - %s",
self.index_path, ex)
return
try:
index = NodeTreeIndex.from_file(index_fh)
except Exception as ex:
logger.error("Error loading index file - %s", ex)
return
finally:
index_fh.close()
self.index = index
logger.info("Loaded index from disk")
def get_field_keys(self):
"""Get field keys for all measurements"""
field_keys = self.memcache.get(self.memcache_fields_key) \
if self.memcache else None
if field_keys:
logger.debug("Found cached field keys")
return field_keys
logger.debug("Calling InfluxDB for field keys")
data = self.client.query('SHOW FIELD KEYS')
field_keys = {}
for ((key, _), vals) in data.items():
field_keys[key] = [val['fieldKey'] for val in vals]
if self.memcache:
if not self.memcache.set(self.memcache_fields_key, field_keys,
time=self.memcache_ttl,
min_compress_len=1):
logger.error("Could not add field key list to memcache - "
"likely field list size over max memcache value")
return field_keys
|
dataengine_reconfigure_spark.py
|
#!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import json
import logging
import multiprocessing
import os
import sys
import traceback
from datalab.actions_lib import *
from datalab.fab import *
from datalab.meta_lib import *
from fabric import *
import subprocess
def install_libs_on_slaves(slave, data_engine):
slave_name = data_engine['slave_node_name'] + '{}'.format(slave + 1)
data_engine['slave_ip'] = get_instance_private_ip_address(
data_engine['tag_name'], slave_name)
params = "--os_user {} --instance_ip {} --keyfile '{}' --resource_type dataengine " \
.format(data_engine['os_user'], data_engine['slave_ip'], data_engine['keyfile'])
try:
# Run script to install additional libs
subprocess.run("~/scripts/{}.py {}".format('reconfigure_spark', params), shell=True, check=True)
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
if __name__ == "__main__":
instance_class = 'notebook'
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
try:
logging.info('[RECONFIGURING SPARK ON DATAENGINE]')
print('[RECONFIGURING SPARK ON DATAENGINE]')
data_engine = dict()
try:
data_engine['os_user'] = os.environ['conf_os_user']
data_engine['service_base_name'] = os.environ['conf_service_base_name']
data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
data_engine['cluster_name'] = os.environ['computational_id']
data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
data_engine['master_ip'] = get_instance_private_ip_address(
data_engine['tag_name'], data_engine['master_node_name'])
data_engine['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
data_engine['instance_count'] = int(node_count(data_engine['cluster_name']))
data_engine['notebook_name'] = os.environ['notebook_instance_name']
data_engine['notebook_ip'] = get_instance_private_ip_address(
data_engine['tag_name'], data_engine['notebook_name'])
except Exception as err:
append_result("Failed to get parameter.", str(err))
sys.exit(1)
params = "--os_user {} --instance_ip {} --keyfile '{}' --resource_type dataengine " \
.format(data_engine['os_user'], data_engine['master_ip'], data_engine['keyfile'])
try:
# Run script to install additional libs
subprocess.run("~/scripts/{}.py {}".format('reconfigure_spark', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
try:
jobs = []
for slave in range(data_engine['instance_count'] - 1):
p = multiprocessing.Process(target=install_libs_on_slaves, args=(slave, data_engine))
jobs.append(p)
p.start()
for job in jobs:
job.join()
for job in jobs:
if job.exitcode != 0:
raise Exception
except:
traceback.print_exc()
raise Exception
params = "--os_user {} --instance_ip {} --keyfile '{}' --resource_type notebook --spark_type dataengine " \
"--cluster_name {}".format(data_engine['os_user'], data_engine['notebook_ip'], data_engine['keyfile'],
data_engine['cluster_name'])
try:
# Run script to get available libs
subprocess.run("~/scripts/{}.py {}".format('reconfigure_spark', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed to reconfigure Spark.", str(err))
sys.exit(1)
try:
with open("/root/result.json", 'w') as result:
res = {"service_base_name": data_engine['service_base_name'],
"Action": "Reconfigure Spark on Data Engine"}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
|
b.py
|
# Dancing Dron - CrazyFly dancing for music beat
# Cnaan Aviv 2013-10-05
import time, sys
import usb
from threading import Thread
import logging
import cflib
from cflib.crazyflie import Crazyflie
from cfclient.utils.logconfigreader import LogConfig
from cfclient.utils.logconfigreader import LogVariable
logging.basicConfig(level=logging.INFO)
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class Main:
def __init__(self):
self.thrust = 25000
self.pitch = -4
self.roll = 1
self.yaw = 0
self.stopping = False
self.jump = 0
self.backward = 0
self.forward = 0
Thread(target=self.gui).start()
self.crazyflie = Crazyflie()
cflib.crtp.init_drivers()
# You may need to update this value if your Crazyradio uses a different frequency.
#self.crazyflie.open_link("radio://0/7/250K")
self.crazyflie.open_link("radio://0/10/250K")
#self.crazyflie.open_link("radio://0/6/1M")
self.crazyflie.connectSetupFinished.add_callback(self.connectSetupFinished)
def connectSetupFinished(self, linkURI):
# Start a separate thread to do the motor test.
# Do not hijack the calling thread!
Thread(target=self.pulse_command).start()
def gui(self):
print "bingo"
while self.stopping==False:
#nb = _GetchUnix()
nb = sys.stdin.read(1)
if nb=='x':
self.stopping = True
if nb=='r':
self.thrust = self.thrust + 1000
if nb=='f':
self.thrust = self.thrust - 1000
if nb=='y':
self.backward = 0
self.forward = 3
if nb=='h':
self.backward = 3
self.forward = 0
if nb=='3':
self.thrust = 35000
if nb=='4':
self.thrust = 39000
if nb=='e':
self.yaw = self.yaw + 1
if nb=='q':
self.yaw = self.yaw - 1
if nb=='d':
self.roll = self.roll + 2
if nb=='a':
self.roll = self.roll - 2
if nb=='w':
self.pitch = self.pitch - 2
if nb=='s':
self.pitch = self.pitch + 2
if nb=='z':
self.jump = 2
sys.stdout.write("thrust=")
print self.thrust
sys.stdout.write("yaw=")
print self.yaw
sys.stdout.write("pitch=")
print self.pitch
sys.stdout.write("roll=")
print self.roll
def pulse_command(self):
while self.stopping == False:
lthrust = self.thrust
lpitch = self.pitch
if self.jump > 0:
lthrust = self.thrust + 25000
if self.forward > 0:
lpitch = self.pitch + 4
if self.backward > 0:
lpitch = self.pitch - 4
self.crazyflie.commander.send_setpoint(self.roll, lpitch, self.yaw, lthrust)
time.sleep(0.15)
self.jump = self.jump - 1;
self.forward = self.forward - 1;
self.backward = self.backward - 1;
self.crazyflie.commander.send_setpoint(0,0,0,0)
time.sleep(0.1)
self.crazyflie.close_link()
Main()
|
test_interactions_aiohttp.py
|
import asyncio
import logging
import time
import unittest
from random import randint
from threading import Thread
from aiohttp import WSMessage
from slack_sdk.socket_mode.request import SocketModeRequest
from slack_sdk.socket_mode.async_client import AsyncBaseSocketModeClient
from slack_sdk.socket_mode.aiohttp import SocketModeClient
from slack_sdk.web.async_client import AsyncWebClient
from tests.slack_sdk.socket_mode.mock_socket_mode_server import (
start_socket_mode_server,
socket_mode_envelopes,
socket_mode_hello_message,
)
from tests.slack_sdk.socket_mode.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
from tests.slack_sdk_async.helpers import async_test
class TestInteractionsAiohttp(unittest.TestCase):
logger = logging.getLogger(__name__)
def setUp(self):
setup_mock_web_api_server(self)
self.web_client = AsyncWebClient(
token="xoxb-api_test",
base_url="http://localhost:8888",
)
def tearDown(self):
cleanup_mock_web_api_server(self)
@async_test
async def test_interactions(self):
t = Thread(target=start_socket_mode_server(self, 3001))
t.daemon = True
t.start()
received_messages = []
received_socket_mode_requests = []
async def message_handler(message: WSMessage):
self.logger.info(f"Raw Message: {message}")
await asyncio.sleep(randint(50, 200) / 1000)
received_messages.append(message.data)
async def socket_mode_listener(
self: AsyncBaseSocketModeClient,
request: SocketModeRequest,
):
self.logger.info(f"Socket Mode Request: {request.payload}")
await asyncio.sleep(randint(50, 200) / 1000)
received_socket_mode_requests.append(request.payload)
client = SocketModeClient(
app_token="xapp-A111-222-xyz",
web_client=self.web_client,
on_message_listeners=[message_handler],
auto_reconnect_enabled=False,
)
client.socket_mode_request_listeners.append(socket_mode_listener)
try:
time.sleep(1) # wait for the server
client.wss_uri = "ws://0.0.0.0:3001/link"
await client.connect()
await asyncio.sleep(1) # wait for the message receiver
for _ in range(10):
await client.send_message("foo")
await client.send_message("bar")
await client.send_message("baz")
expected = (
socket_mode_envelopes
+ [socket_mode_hello_message]
+ ["foo", "bar", "baz"] * 10
)
expected.sort()
expected.sort()
count = 0
while count < 10 and len(received_messages) < len(expected):
await asyncio.sleep(0.2)
count += 0.2
received_messages.sort()
self.assertEqual(received_messages, expected)
self.assertEqual(
len(socket_mode_envelopes), len(received_socket_mode_requests)
)
finally:
await client.close()
self.server.stop()
self.server.close()
|
testing.py
|
#############################################################################
#
# Copyright (c) 2004-2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Various test-support utility functions
"""
try:
# Python 3
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.request import urlopen
except ImportError:
# Python 2
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from urllib2 import urlopen
import errno
import logging
import multiprocessing
import os
import pkg_resources
import random
import re
import shutil
import socket
import subprocess
import sys
import tempfile
import threading
import time
import zc.buildout.buildout
import zc.buildout.easy_install
from zc.buildout.rmtree import rmtree
print_ = zc.buildout.buildout.print_
fsync = getattr(os, 'fsync', lambda fileno: None)
is_win32 = sys.platform == 'win32'
def read(path='out', *rest):
with open(os.path.join(path, *rest)) as f:
return f.read()
def cat(dir, *names):
path = os.path.join(dir, *names)
if (not os.path.exists(path)
and is_win32
and os.path.exists(path+'-script.py')
):
path = path+'-script.py'
with open(path) as f:
print_(f.read(), end='')
def eqs(a, *b):
a = set(a); b = set(b)
return None if a == b else (a - b, b - a)
def clear_here():
for name in os.listdir('.'):
if os.path.isfile(name) or os.path.islink(name):
os.remove(name)
else:
shutil.rmtree(name)
def ls(dir, *subs):
if subs:
dir = os.path.join(dir, *subs)
names = sorted(os.listdir(dir))
for name in names:
# If we're running under coverage, elide coverage files
if os.getenv("COVERAGE_PROCESS_START") and name.startswith('.coverage.'):
continue
if os.path.isdir(os.path.join(dir, name)):
print_('d ', end=' ')
elif os.path.islink(os.path.join(dir, name)):
print_('l ', end=' ')
else:
print_('- ', end=' ')
print_(name)
def mkdir(*path):
os.mkdir(os.path.join(*path))
def remove(*path):
path = os.path.join(*path)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def rmdir(*path):
shutil.rmtree(os.path.join(*path))
def write(dir, *args):
path = os.path.join(dir, *(args[:-1]))
f = open(path, 'w')
f.write(args[-1])
f.flush()
fsync(f.fileno())
f.close()
def clean_up_pyc(*path):
base, filename = os.path.join(*path[:-1]), path[-1]
if filename.endswith('.py'):
filename += 'c' # .py -> .pyc
for path in (
os.path.join(base, filename),
os.path.join(base, '__pycache__'),
):
if os.path.isdir(path):
rmdir(path)
elif os.path.exists(path):
remove(path)
## FIXME - check for other platforms
MUST_CLOSE_FDS = not sys.platform.startswith('win')
def system(command, input='', with_exit_code=False, env=None):
# Some TERMinals, especially xterm and its variants, add invisible control
# characters, which we do not want as they mess up doctests. See:
# https://github.com/buildout/buildout/pull/311
# http://bugs.python.org/issue19884
sub_env = dict(os.environ, TERM='dumb')
if env is not None:
sub_env.update(env)
p = subprocess.Popen(command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=MUST_CLOSE_FDS,
env=sub_env)
i, o, e = (p.stdin, p.stdout, p.stderr)
if input:
i.write(input.encode())
i.close()
result = o.read() + e.read()
o.close()
e.close()
output = result.decode()
if with_exit_code:
# Use the with_exit_code=True parameter when you want to test the exit
# code of the command you're running.
output += 'EXIT CODE: %s' % p.wait()
p.wait()
return output
def get(url):
return str(urlopen(url).read().decode())
def _runsetup(setup, *args):
if os.path.isdir(setup):
setup = os.path.join(setup, 'setup.py')
args = list(args)
args.insert(0, '-q')
here = os.getcwd()
try:
os.chdir(os.path.dirname(setup))
zc.buildout.easy_install.call_subprocess(
[sys.executable, setup] + args,
env=dict(os.environ,
PYTHONPATH=zc.buildout.easy_install.pip_pythonpath,
),
)
if os.path.exists('build'):
rmtree('build')
finally:
os.chdir(here)
def sdist(setup, dest):
_runsetup(setup, 'sdist', '-d', dest, '--formats=zip')
def bdist_egg(setup, executable, dest=None):
# Backward compat:
if dest is None:
dest = executable
else:
assert executable == sys.executable, (executable, sys.executable)
_runsetup(setup, 'bdist_egg', '-d', dest)
def wait_until(label, func, *args, **kw):
if 'timeout' in kw:
kw = dict(kw)
timeout = kw.pop('timeout')
else:
timeout = 30
deadline = time.time()+timeout
while time.time() < deadline:
if func(*args, **kw):
return
time.sleep(0.01)
raise ValueError('Timed out waiting for: '+label)
class TestOptions(zc.buildout.buildout.Options):
def __init__(self, *args):
zc.buildout.buildout.Options.__init__(self, *args)
self._created = []
def initialize(self):
pass
class Buildout(zc.buildout.buildout.Buildout):
def __init__(self):
for name in 'eggs', 'parts':
if not os.path.exists(name):
os.mkdir(name)
zc.buildout.buildout.Buildout.__init__(
self, '', [('buildout', 'directory', os.getcwd())], False)
Options = TestOptions
def buildoutSetUp(test):
test.globs['__tear_downs'] = __tear_downs = []
test.globs['register_teardown'] = register_teardown = __tear_downs.append
prefer_final = zc.buildout.easy_install.prefer_final()
register_teardown(
lambda: zc.buildout.easy_install.prefer_final(prefer_final)
)
here = os.getcwd()
register_teardown(lambda: os.chdir(here))
handlers_before_set_up = logging.getLogger().handlers[:]
def restore_root_logger_handlers():
root_logger = logging.getLogger()
for handler in root_logger.handlers[:]:
root_logger.removeHandler(handler)
for handler in handlers_before_set_up:
root_logger.addHandler(handler)
bo_logger = logging.getLogger('zc.buildout')
for handler in bo_logger.handlers[:]:
bo_logger.removeHandler(handler)
register_teardown(restore_root_logger_handlers)
base = tempfile.mkdtemp('buildoutSetUp')
base = os.path.realpath(base)
register_teardown(lambda base=base: rmtree(base))
old_home = os.environ.get('HOME')
os.environ['HOME'] = os.path.join(base, 'bbbBadHome')
def restore_home():
if old_home is None:
del os.environ['HOME']
else:
os.environ['HOME'] = old_home
register_teardown(restore_home)
base = os.path.join(base, '_TEST_')
os.mkdir(base)
tmp = tempfile.mkdtemp('buildouttests')
register_teardown(lambda: rmtree(tmp))
zc.buildout.easy_install.default_index_url = 'file://'+tmp
os.environ['buildout_testing_index_url'] = (
zc.buildout.easy_install.default_index_url)
def tmpdir(name):
path = os.path.join(base, name)
mkdir(path)
return path
sample = tmpdir('sample-buildout')
os.chdir(sample)
# Create a basic buildout.cfg to avoid a warning from buildout:
with open('buildout.cfg', 'w') as f:
f.write("[buildout]\nparts =\n")
# Use the buildout bootstrap command to create a buildout
zc.buildout.buildout.Buildout(
'buildout.cfg',
[('buildout', 'log-level', 'WARNING'),
# trick bootstrap into putting the buildout develop egg
# in the eggs dir.
('buildout', 'develop-eggs-directory', 'eggs'),
]
).bootstrap([])
# Create the develop-eggs dir, which didn't get created the usual
# way due to the trick above:
os.mkdir('develop-eggs')
path_to_coveragerc = os.getenv("COVERAGE_PROCESS_START", None)
if path_to_coveragerc is not None:
# Before we return to the current directory and destroy the
# temporary working directory, we need to copy all the coverage files
# back so that they can be `coverage combine`d.
def copy_coverage_files():
coveragedir = os.path.dirname(path_to_coveragerc)
import glob
for f in glob.glob('.coverage*'):
shutil.copy(f, coveragedir)
__tear_downs.insert(0, copy_coverage_files)
# Now we must modify the newly created bin/buildout to
# actually begin coverage.
with open('bin/buildout') as f:
import textwrap
lines = f.read().splitlines()
assert lines[1] == '', lines
lines[1] = 'import coverage; coverage.process_startup()'
with open('bin/buildout', 'w') as f:
f.write('\n'.join(lines))
def start_server(path):
port, thread = _start_server(path, name=path)
url = 'http://localhost:%s/' % port
register_teardown(lambda: stop_server(url, thread))
return url
cdpaths = []
def cd(*path):
path = os.path.join(*path)
cdpaths.append(os.path.abspath(os.getcwd()))
os.chdir(path)
def uncd():
os.chdir(cdpaths.pop())
test.globs.update(dict(
sample_buildout = sample,
ls = ls,
cat = cat,
mkdir = mkdir,
rmdir = rmdir,
remove = remove,
tmpdir = tmpdir,
write = write,
system = system,
get = get,
cd = cd, uncd = uncd,
join = os.path.join,
sdist = sdist,
bdist_egg = bdist_egg,
start_server = start_server,
stop_server = stop_server,
buildout = os.path.join(sample, 'bin', 'buildout'),
wait_until = wait_until,
print_ = print_,
clean_up_pyc = clean_up_pyc,
))
zc.buildout.easy_install.prefer_final(prefer_final)
def buildoutTearDown(test):
for f in test.globs['__tear_downs']:
f()
class Server(HTTPServer):
def __init__(self, tree, *args):
HTTPServer.__init__(self, *args)
self.tree = os.path.abspath(tree)
__run = True
def serve_forever(self):
while self.__run:
self.handle_request()
def handle_error(self, *_):
self.__run = False
class Handler(BaseHTTPRequestHandler):
Server.__log = False
def __init__(self, request, address, server):
self.__server = server
self.tree = server.tree
BaseHTTPRequestHandler.__init__(self, request, address, server)
def do_GET(self):
if '__stop__' in self.path:
self.__server.server_close()
raise SystemExit
def k():
self.send_response(200)
out = '<html><body>k</body></html>\n'.encode()
self.send_header('Content-Length', str(len(out)))
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(out)
if self.path == '/enable_server_logging':
self.__server.__log = True
return k()
if self.path == '/disable_server_logging':
self.__server.__log = False
return k()
path = os.path.abspath(os.path.join(self.tree, *self.path.split('/')))
if not (
((path == self.tree) or path.startswith(self.tree+os.path.sep))
and
os.path.exists(path)
):
self.send_response(404, 'Not Found')
#self.send_response(200)
out = '<html><body>Not Found</body></html>'.encode()
#out = '\n'.join(self.tree, self.path, path)
self.send_header('Content-Length', str(len(out)))
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(out)
return
self.send_response(200)
if os.path.isdir(path):
out = ['<html><body>\n']
names = sorted(os.listdir(path))
for name in names:
if os.path.isdir(os.path.join(path, name)):
name += '/'
out.append('<a href="%s">%s</a><br>\n' % (name, name))
out.append('</body></html>\n')
out = ''.join(out).encode()
self.send_header('Content-Length', str(len(out)))
self.send_header('Content-Type', 'text/html')
else:
with open(path, 'rb') as f:
out = f.read()
self.send_header('Content-Length', len(out))
if path.endswith('.egg'):
self.send_header('Content-Type', 'application/zip')
elif path.endswith('.gz'):
self.send_header('Content-Type', 'application/x-gzip')
elif path.endswith('.zip'):
self.send_header('Content-Type', 'application/x-gzip')
else:
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(out)
def log_request(self, code):
if self.__server.__log:
print_('%s %s %s' % (self.command, code, self.path))
def _run(tree, port):
server_address = ('localhost', port)
httpd = Server(tree, server_address, Handler)
httpd.serve_forever()
httpd.server_close()
def get_port():
for i in range(10):
port = random.randrange(20000, 30000)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
try:
s.connect(('localhost', port))
except socket.error:
return port
finally:
s.close()
raise RuntimeError("Can't find port")
def _start_server(tree, name=''):
port = get_port()
thread = threading.Thread(target=_run, args=(tree, port), name=name)
thread.setDaemon(True)
thread.start()
wait(port, up=True)
return port, thread
def start_server(tree):
return _start_server(tree)[0]
def stop_server(url, thread=None):
try:
urlopen(url+'__stop__')
except Exception:
pass
if thread is not None:
thread.join() # wait for thread to stop
def wait(port, up):
addr = 'localhost', port
for i in range(120):
time.sleep(0.25)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(addr)
s.close()
if up:
break
except socket.error:
e = sys.exc_info()[1]
if e[0] not in (errno.ECONNREFUSED, errno.ECONNRESET):
raise
s.close()
if not up:
break
else:
if up:
raise
else:
raise SystemError("Couldn't stop server")
def install(project, destination):
if not isinstance(destination, str):
destination = os.path.join(destination.globs['sample_buildout'],
'eggs')
dist = pkg_resources.working_set.find(
pkg_resources.Requirement.parse(project))
if dist.location.endswith('.egg'):
destination = os.path.join(destination,
os.path.basename(dist.location),
)
if os.path.isdir(dist.location):
shutil.copytree(dist.location, destination)
else:
shutil.copyfile(dist.location, destination)
else:
# copy link
with open(os.path.join(destination, project+'.egg-link'), 'w') as f:
f.write(dist.location)
def install_develop(project, destination):
if not isinstance(destination, str):
destination = os.path.join(destination.globs['sample_buildout'],
'develop-eggs')
dist = pkg_resources.working_set.find(
pkg_resources.Requirement.parse(project))
with open(os.path.join(destination, project+'.egg-link'), 'w') as f:
f.write(dist.location)
def _normalize_path(match):
path = match.group(1)
if os.path.sep == '\\':
path = path.replace('\\\\', '/')
if path.startswith('\\'):
path = path[1:]
return '/' + path.replace(os.path.sep, '/')
normalize_path = (
re.compile(
r'''[^'" \t\n\r]+\%(sep)s_[Tt][Ee][Ss][Tt]_\%(sep)s([^"' \t\n\r]+)'''
% dict(sep=os.path.sep)),
_normalize_path,
)
normalize_endings = re.compile('\r\n'), '\n'
normalize_script = (
re.compile('(\n?)- ([a-zA-Z_.-]+)-script.py\n- \\2.exe\n'),
'\\1- \\2\n')
if sys.version_info > (2, ):
normalize___pycache__ = (
re.compile('(\n?)d __pycache__\n'), '\\1')
else:
normalize___pycache__ = (
re.compile(r'(\n?)- \S+\.pyc\n'), '\\1')
normalize_egg_py = (
re.compile(r'-py\d[.]\d+(-\S+)?\.egg'),
'-pyN.N.egg',
)
normalize_exception_type_for_python_2_and_3 = (
re.compile(r'^(\w+\.)*([A-Z][A-Za-z0-9]+Error: )'),
'\2')
normalize_open_in_generated_script = (
re.compile(r"open\(__file__, 'U'\)"), 'open(__file__)')
not_found = (re.compile(r'Not found: [^\n]+/(\w|\.)+/\r?\n'), '')
python27_warning = (re.compile(r'DEPRECATION: Python 2.7 reached the end of its '
'life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no '
'longer maintained. A future version of pip will drop support for Python '
'2.7. More details about Python 2 support in pip, can be found at '
'https://pip.pypa.io/en/latest/development/release-process/#python-2-support\n'),
'')
python27_warning_2 = (re.compile(r'DEPRECATION: Python 2.7 reached the end of its '
'life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no '
'longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. '
'More details about Python 2 support in pip, can be found at '
'https://pip.pypa.io/en/latest/development/release-process/#python-2-support\n'),
'')
easyinstall_deprecated = (re.compile(r'.*EasyInstallDeprecationWarning.*\n'),'')
setuptools_deprecated = (re.compile(r'.*SetuptoolsDeprecationWarning.*\n'),'')
pkg_resources_deprecated = (re.compile(r'.*PkgResourcesDeprecationWarning.*\n'),'')
warnings_warn = (re.compile(r'.*warnings\.warn.*\n'),'')
# Setuptools now pulls in dependencies when installed.
adding_find_link = (re.compile(r"Adding find link '[^']+'"
r" from setuptools .*\r?\n"), '')
ignore_not_upgrading = (
re.compile(
'Not upgrading because not running a local buildout command.\n'
), '')
def run_buildout(command):
# Make sure we don't get .buildout
os.environ['HOME'] = os.path.join(os.getcwd(), 'home')
args = command.split()
buildout = pkg_resources.load_entry_point(
'zc.buildout', 'console_scripts', args[0])
buildout(args[1:])
def run_from_process(target, *args, **kw):
sys.stdout = sys.stderr = open('out', 'w')
target(*args, **kw)
def run_in_process(*args, **kwargs):
try:
ctx = multiprocessing.get_context('fork')
process = ctx.Process(target=run_from_process, args=args, kwargs=kwargs)
except AttributeError:
process = multiprocessing.Process(target=run_from_process, args=args, kwargs=kwargs)
process.daemon = True
process.start()
process.join(99)
if process.is_alive() or process.exitcode:
with open('out') as f:
print(f.read())
def run_buildout_in_process(command='buildout'):
command = command.split(' ', 1)
command.insert(
1,
" use-dependency-links=false"
# Leaving this here so we can uncomment to see what's going on.
#" log-format=%(asctime)s____%(levelname)s_%(message)s -vvv"
" index=" + __file__ + 'nonexistent' # hide index
)
command = ' '.join(command)
run_in_process(run_buildout, command)
def setup_coverage(path_to_coveragerc):
if 'RUN_COVERAGE' not in os.environ:
return
if not os.path.exists(path_to_coveragerc):
raise ValueError('coveragerc file %s does not exist.' % path_to_coveragerc)
os.environ['COVERAGE_PROCESS_START'] = path_to_coveragerc
rootdir = os.path.dirname(path_to_coveragerc)
def combine_report():
subprocess.call(
[
sys.executable, '-m', 'coverage', 'combine',
],
cwd=rootdir,
)
subprocess.call(
[
sys.executable, '-m', 'coverage', 'report',
],
cwd=rootdir,
)
if path_to_coveragerc:
try:
import coverage
print("Coverage configured with %s" % path_to_coveragerc)
if 'COVERAGE_REPORT' in os.environ:
import atexit
atexit.register(combine_report)
coverage.process_startup()
except ImportError:
print(
"You try to run coverage "
"but coverage is not installed in your environment."
)
sys.exit(1)
|
main.py
|
from byj.stepper import *
from picamcd.colorcam import *
import sys
import time
from servo.servo import *
from motor.motor import *
from stepper.stepper import *
import pygame
from pygame.locals import *
from config import *
from beadsort.hexcolor import *
from beadsort.bead import *
from beadsort.colorbin import *
import logging
from threading import Thread
import math
logging.basicConfig(filename="./example.log", level=logging.DEBUG)
logging.info("test")
pygame.init()
logging.info("testb")
smallfont = pygame.font.SysFont('Arial',35)
servo = Servo(23,position_dict["home"])
motor = Motor(24) #needs to start off
color_000 = Color("black")
stepper = Stepper(stepper_pin_list)
DISPLAY= pygame.display.set_mode((screen_w, screen_h))
#LOG CONFIG
cam = ColorCam()
cur_color = cam.get_color(bead_loc_x,bead_loc_y,bead_loc_dimension)
#color wheel
color_wheel_list = generate_circle_color(1)
bin_list = []
def get_bead():
#check if bead exists
servo.set_pos_delay(position_dict["home"],servo_delay,True)
default_color = cam.get_default_color()
repeat_cnt = 1
init_motor_delay = 0.01
cur_diff = get_difference(cur_color,default_color)
global sort_mode
while cur_diff < def_threshold and sort_mode:
for i in range(repeat_cnt):
motor.run(0.01 + i*0.01)
cur_diff = get_difference(cur_color,default_color)
if cur_diff > def_threshold:
break
time.sleep(0.75)
repeat_cnt+=1
if repeat_cnt > 20:
print("giving up")
return Bead(Color("black"))
#return bead
ret_bead = Bead(cur_color)
print("creating bead with color ",cur_color,"diff",cur_diff)
return ret_bead
def get_tgt_bin(tgt_bead,tgt_bin_list):
tgt_diff = 1000
ret_idx = 0
for bin_idx in range(1,len(bin_list)):
cur_bin = bin_list[bin_idx]
cur_diff = cur_bin.compare_color(tgt_bead.get_color())
# if cur_bin.check_color_match(tgt_bead.get_color()) and cur_diff < tgt_diff:
if cur_diff < tgt_diff and cur_diff < color_threshold:
tgt_diff = cur_diff
ret_idx = bin_idx
return ret_idx
def sort_bead(tgt_bin_list):
cur_bead = get_bead()
bin_idx = get_tgt_bin(cur_bead,bin_list)
if bin_idx == 0 and len(bin_list)<max_bin:
new_bin = ColorBin(cur_bead.get_color(),color_threshold,bead_x_cnt)
# add bin
bin_list.append(new_bin)
bin_idx = len(bin_list)-1
bin_list[bin_idx].add_bead(cur_bead)
# if bin_idx!=2:
stepper.move(bin_idx*stepper_interval,True) # move slide to bin
motor.run_delay(wiggle_delay,motor_delay)
# print("filtering bead",bin_idx)
servo.set_pos_delay(position_dict["filter"],servo_delay,True)
time.sleep(0.1)
servo.set_pos(position_dict["filter"]+5.0)
motor.run_delay(wiggle_delay,motor_delay)
servo.set_pos_delay(position_dict["home"],servo_delay,True)
time.sleep(0.25)
servo.set_pos(position_dict["home"])
time.sleep(0.4)
motor.run_delay(wiggle_delay*0.5,motor_delay)
stepper.move(bin_idx*stepper_interval,False) # move slide to bin
def refresh_vals():
tmp_color = None
while tmp_color is None:
try:
tmp_color = cam.get_refresh_vals(bead_loc_x,bead_loc_y,bead_loc_dimension)
except:
tmp_color = None
return tmp_color
def draw_cam_color():
init_y = color_y
def_color = cam.get_default_color()
global cur_color
def_rect = pygame.Rect(color_x+120,init_y,50,70)
def_surf = pygame.Surface((50, 70))
def_surf.fill(hex_tuple(def_color))
DISPLAY.blit(def_surf,def_rect)
def_text = smallfont.render("default",True,hex_tuple(color_000))
DISPLAY.blit(def_text,(color_x,init_y))
def_rgb = smallfont.render(str(def_color),True,hex_tuple(color_000))
DISPLAY.blit(def_rgb,(color_x+180,init_y))
init_y+=80
draw_color = cur_color
cam_rect = pygame.Rect(color_x+120,init_y,50,70)
cam_surf = pygame.Surface((50,70))
cam_surf.fill(hex_tuple(draw_color))
DISPLAY.blit(cam_surf,cam_rect)
cam_text = smallfont.render("CAM",True,hex_tuple(color_000))
DISPLAY.blit(cam_text,(color_x,init_y))
cam_rgb = smallfont.render(str(draw_color),True,hex_tuple(color_000))
DISPLAY.blit(cam_rgb,(color_x+180,init_y))
cam_diff_str = get_difference(def_color,cur_color)#str(cur_refresh_vals[1])
cam_diff_str = round(cam_diff_str,5)
cam_diff_str = str(cam_diff_str) + " / " + str(def_threshold)
cam_diff = smallfont.render(cam_diff_str,True,hex_tuple(color_000))
DISPLAY.blit(cam_diff,(color_x+350,init_y))
#draw color wheel match
cur_w = 0
cur_radius = 10
angle_interval = math.radians(360/len(color_wheel_list))
wheel_radius = 150
cur_closest_idx = 0
cur_diff = 1000.0
closest_idx = get_closest_wheel_color(draw_color,color_wheel_list)
for idx in range(len(color_wheel_list)):
tmp_color = hex_tuple(color_wheel_list[idx])
cur_angle = angle_interval * idx
cur_x = int(wheel_radius * math.cos(cur_angle) + color_wheel_x)
cur_y = int(wheel_radius * math.sin(cur_angle) + color_wheel_y)
tmp_radius = cur_radius
if idx == closest_idx:
tmp_radius = 20
pygame.draw.circle(DISPLAY,tmp_color,(cur_x,cur_y),tmp_radius)
# blit idx val
hsl_diff_str = get_hsl_difference(draw_color,color_wheel_list[closest_idx])
hsl_diff_str = "HSL: " + str(round(hsl_diff_str,5))
rgb_diff_str = get_difference(draw_color,color_wheel_list[closest_idx])
rgb_diff_str = "RGB: " + str(round(rgb_diff_str,5)) + "/" + str(round(max_threshold,5))
hsl_text = smallfont.render(hsl_diff_str,True,hex_tuple(color_000))
DISPLAY.blit(hsl_text,(color_wheel_x-80,color_wheel_y-20))
rgb_text = smallfont.render(rgb_diff_str,True,hex_tuple(color_000))
DISPLAY.blit(rgb_text,(color_wheel_x-80,color_wheel_y+20))
pygame.display.update()
return draw_color
def refresh_servo():
servo_color = Color("gray")
if servo.check_moving() is 1:
servo_color = Color("green")
servo_pos = servo.get_pos()
text = smallfont.render(str(servo_pos), True ,hex_tuple(color_000))
DISPLAY.blit(text,(servo_win_x,servo_win_y))
servo_status_rect = pygame.Rect(servo_win_x+120,servo_win_y,50,30)
servo_status_surf = pygame.Surface((50, 30))
servo_status_surf.fill(hex_tuple(servo_color))
DISPLAY.blit(servo_status_surf,servo_status_rect)
textb = smallfont.render(servo.get_action(),True,hex_tuple(color_000))
DISPLAY.blit(textb,(servo_win_x+200,servo_win_y))
def refresh_bins(tgt_bin_list):
cur_bin_y = init_bin_y-60
cur_x = 110
for idx in range(1,len(bin_list)):
bin_list[idx].move_to(cur_x,cur_bin_y)
cur_bin_y += 52
bin_list[idx].draw_bin(DISPLAY)
if idx == int(max_bin/2)+1:
cur_x += int(screen_w/2)
cur_bin_y = init_bin_y-60
# draw last bin last
bin_list[0].move_to(cur_x,cur_bin_y)
bin_list[0].draw_bin(DISPLAY)
def init_cam():
# default_color = cam.get_color(bead_loc_x,bead_loc_y)
default_color = Color(def_color) #hardcode
cam.set_default_color(default_color)
cam.set_default_viewport(bead_loc_x,bead_loc_y,bead_loc_dimension)
cam.set_overlay_img(bead_loc_x,bead_loc_y,bead_loc_dimension,default_color)
# cam.save_img_locally()
cam.add_overlay()
cur_color = cam.get_color(bead_loc_x,bead_loc_y,bead_loc_dimension)
def main():
# generate circle
#init code
print(position_dict)
filter_bin = ColorBin(Color("black"),-1,bead_x_cnt)
filter_bin.move_to(cam_w+10,10)
bin_list.append(filter_bin)
# default color_assume empty chamber
servo.set_pos(position_dict["home"])
init_cam()
global sort_mode
sort_mode = False
motion_thread = Thread()
while True:
global cur_color
cur_color = cam.get_color(bead_loc_x,bead_loc_y,bead_loc_dimension)
# print("main loop",cur_color)
for event in pygame.event.get():
if event.type==QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == pygame.K_LEFT:
servo.set_pos(servo.get_pos()-servo_interval)
if event.key == pygame.K_RIGHT:
servo.set_pos(servo.get_pos()+servo_interval)
if event.key == pygame.K_q: # dispense to graveyard
logging.info("graveyard")
servo.set_pos_delay(position_dict["graveyard"],servo_delay,True)
if event.key == pygame.K_e: # dispense to filter
logging.info("filter")
servo.set_pos_delay(position_dict["filter"],servo_delay,True)
if event.key == pygame.K_w: # to home
logging.info("home")
servo.set_pos_delay(position_dict["home"],servo_delay,True)
time.sleep(1)
if event.key == pygame.K_t:
# cur_color = cam.get_refresh_vals(bead_loc_x,bead_loc_y,bead_loc_dimension)[0]
cur_color = refresh_vals()[0]
print("setting default",cur_color)
cam.set_default_color(cur_color)
cam.set_default_viewport(bead_loc_x,bead_loc_y,bead_loc_dimension)
cam.remove_overlay()
cam.set_overlay_img(bead_loc_x,bead_loc_y,bead_loc_dimension,cur_color)
cam.add_overlay()
# stepper 0
stepper.set_position(0)
if event.key == pygame.K_s:
sort_mode = not sort_mode
print("sorting mode ",sort_mode)
if event.key == pygame.K_a:
logging.info("run_motor")
motor.run_delay(wiggle_delay,motor_delay)
if event.key == pygame.K_g:
logging.info("run sequence")
if event.key == pygame.K_h:
for i in range(20):
motor.run_delay(motor_delay + i*0.02,0.2)
if event.key == pygame.K_n:
print("stepper left")
stepper.move(stepper_interval,True)
if event.key == pygame.K_m:
print("stepper right")
stepper.move(stepper_interval,False)
if sort_mode:
if motion_thread.is_alive() is False:
print("sorting bead")
motion_thread = Thread(target=sort_bead,args=(bin_list,))
motion_thread.start()
DISPLAY.fill(hex_tuple(Color("white"))) #clear screen
refresh_bins(bin_list)
refresh_servo()
draw_cam_color()
pygame.display.update()
if __name__ == "__main__":
main()
|
bridge.py
|
#!/usr/bin/env python3
import argparse
import carla # pylint: disable=import-error
import math
import numpy as np
import time
import threading
from cereal import log
from multiprocessing import Process, Queue
from typing import Any
import cereal.messaging as messaging
from common.params import Params
from common.realtime import Ratekeeper, DT_DMON
from lib.can import can_function
from selfdrive.car.honda.values import CruiseButtons
from selfdrive.test.helpers import set_params_enabled
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--joystick', action='store_true')
parser.add_argument('--low_quality', action='store_true')
parser.add_argument('--town', type=str, default='Town04_Opt')
parser.add_argument('--spawn_point', dest='num_selected_spawn_point',
type=int, default=16)
args = parser.parse_args()
W, H = 1164, 874
REPEAT_COUNTER = 5
PRINT_DECIMATION = 100
STEER_RATIO = 15.
pm = messaging.PubMaster(['roadCameraState', 'sensorEvents', 'can', "gpsLocationExternal"])
sm = messaging.SubMaster(['carControl','controlsState'])
class VehicleState:
def __init__(self):
self.speed = 0
self.angle = 0
self.bearing_deg = 0.0
self.vel = carla.Vector3D()
self.cruise_button= 0
self.is_engaged=False
def steer_rate_limit(old, new):
# Rate limiting to 0.5 degrees per step
limit = 0.5
if new > old + limit:
return old + limit
elif new < old - limit:
return old - limit
else:
return new
frame_id = 0
def cam_callback(image):
global frame_id
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0, 1, 2]].copy()
dat = messaging.new_message('roadCameraState')
dat.roadCameraState = {
"frameId": image.frame,
"image": img.tobytes(),
"transform": [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
}
pm.send('roadCameraState', dat)
frame_id += 1
def imu_callback(imu, vehicle_state):
vehicle_state.bearing_deg = math.degrees(imu.compass)
dat = messaging.new_message('sensorEvents', 2)
dat.sensorEvents[0].sensor = 4
dat.sensorEvents[0].type = 0x10
dat.sensorEvents[0].init('acceleration')
dat.sensorEvents[0].acceleration.v = [imu.accelerometer.x, imu.accelerometer.y, imu.accelerometer.z]
# copied these numbers from locationd
dat.sensorEvents[1].sensor = 5
dat.sensorEvents[1].type = 0x10
dat.sensorEvents[1].init('gyroUncalibrated')
dat.sensorEvents[1].gyroUncalibrated.v = [imu.gyroscope.x, imu.gyroscope.y, imu.gyroscope.z]
pm.send('sensorEvents', dat)
def panda_state_function(exit_event: threading.Event):
pm = messaging.PubMaster(['pandaState'])
while not exit_event.is_set():
dat = messaging.new_message('pandaState')
dat.valid = True
dat.pandaState = {
'ignitionLine': True,
'pandaType': "blackPanda",
'controlsAllowed': True,
'safetyModel': 'hondaNidec'
}
pm.send('pandaState', dat)
time.sleep(0.5)
def gps_callback(gps, vehicle_state):
dat = messaging.new_message('gpsLocationExternal')
# transform vel from carla to NED
# north is -Y in CARLA
velNED = [
-vehicle_state.vel.y, # north/south component of NED is negative when moving south
vehicle_state.vel.x, # positive when moving east, which is x in carla
vehicle_state.vel.z,
]
dat.gpsLocationExternal = {
"timestamp": int(time.time() * 1000),
"flags": 1, # valid fix
"accuracy": 1.0,
"verticalAccuracy": 1.0,
"speedAccuracy": 0.1,
"bearingAccuracyDeg": 0.1,
"vNED": velNED,
"bearingDeg": vehicle_state.bearing_deg,
"latitude": gps.latitude,
"longitude": gps.longitude,
"altitude": gps.altitude,
"speed": vehicle_state.speed,
"source": log.GpsLocationData.SensorSource.ublox,
}
pm.send('gpsLocationExternal', dat)
def fake_driver_monitoring(exit_event: threading.Event):
pm = messaging.PubMaster(['driverState','driverMonitoringState'])
while not exit_event.is_set():
# dmonitoringmodeld output
dat = messaging.new_message('driverState')
dat.driverState.faceProb = 1.0
pm.send('driverState', dat)
# dmonitoringd output
dat = messaging.new_message('driverMonitoringState')
dat.driverMonitoringState = {
"faceDetected": True,
"isDistracted": False,
"awarenessStatus": 1.,
}
pm.send('driverMonitoringState', dat)
time.sleep(DT_DMON)
def can_function_runner(vs: VehicleState, exit_event: threading.Event):
i = 0
while not exit_event.is_set():
can_function(pm, vs.speed, vs.angle, i, vs.cruise_button, vs.is_engaged)
time.sleep(0.01)
i+=1
def bridge(q):
# setup CARLA
client = carla.Client("127.0.0.1", 2000)
client.set_timeout(10.0)
world = client.load_world(args.town)
if args.low_quality:
world.unload_map_layer(carla.MapLayer.Foliage)
world.unload_map_layer(carla.MapLayer.Buildings)
world.unload_map_layer(carla.MapLayer.ParkedVehicles)
world.unload_map_layer(carla.MapLayer.Particles)
world.unload_map_layer(carla.MapLayer.Props)
world.unload_map_layer(carla.MapLayer.StreetLights)
blueprint_library = world.get_blueprint_library()
world_map = world.get_map()
vehicle_bp = blueprint_library.filter('vehicle.tesla.*')[1]
spawn_points = world_map.get_spawn_points()
assert len(spawn_points) > args.num_selected_spawn_point, \
f'''No spawn point {args.num_selected_spawn_point}, try a value between 0 and
{len(spawn_points)} for this town.'''
spawn_point = spawn_points[args.num_selected_spawn_point]
vehicle = world.spawn_actor(vehicle_bp, spawn_point)
max_steer_angle = vehicle.get_physics_control().wheels[0].max_steer_angle
# make tires less slippery
# wheel_control = carla.WheelPhysicsControl(tire_friction=5)
physics_control = vehicle.get_physics_control()
physics_control.mass = 2326
# physics_control.wheels = [wheel_control]*4
physics_control.torque_curve = [[20.0, 500.0], [5000.0, 500.0]]
physics_control.gear_switch_time = 0.0
vehicle.apply_physics_control(physics_control)
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
blueprint.set_attribute('image_size_y', str(H))
blueprint.set_attribute('fov', '70')
blueprint.set_attribute('sensor_tick', '0.05')
transform = carla.Transform(carla.Location(x=0.8, z=1.13))
camera = world.spawn_actor(blueprint, transform, attach_to=vehicle)
camera.listen(cam_callback)
vehicle_state = VehicleState()
# reenable IMU
imu_bp = blueprint_library.find('sensor.other.imu')
imu = world.spawn_actor(imu_bp, transform, attach_to=vehicle)
imu.listen(lambda imu: imu_callback(imu, vehicle_state))
gps_bp = blueprint_library.find('sensor.other.gnss')
gps = world.spawn_actor(gps_bp, transform, attach_to=vehicle)
gps.listen(lambda gps: gps_callback(gps, vehicle_state))
# launch fake car threads
threads = []
exit_event = threading.Event()
threads.append(threading.Thread(target=panda_state_function, args=(exit_event,)))
threads.append(threading.Thread(target=fake_driver_monitoring, args=(exit_event,)))
threads.append(threading.Thread(target=can_function_runner, args=(vehicle_state, exit_event,)))
for t in threads:
t.start()
# can loop
rk = Ratekeeper(100, print_delay_threshold=0.05)
# init
throttle_ease_out_counter = REPEAT_COUNTER
brake_ease_out_counter = REPEAT_COUNTER
steer_ease_out_counter = REPEAT_COUNTER
vc = carla.VehicleControl(throttle=0, steer=0, brake=0, reverse=False)
is_openpilot_engaged = False
throttle_out = steer_out = brake_out = 0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0
old_steer = old_brake = old_throttle = 0
throttle_manual_multiplier = 0.7 #keyboard signal is always 1
brake_manual_multiplier = 0.7 #keyboard signal is always 1
steer_manual_multiplier = 45 * STEER_RATIO #keyboard signal is always 1
while 1:
# 1. Read the throttle, steer and brake from op or manual controls
# 2. Set instructions in Carla
# 3. Send current carstate to op via can
cruise_button = 0
throttle_out = steer_out = brake_out = 0.0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0.0
# --------------Step 1-------------------------------
if not q.empty():
message = q.get()
m = message.split('_')
if m[0] == "steer":
steer_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "throttle":
throttle_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "brake":
brake_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "reverse":
#in_reverse = not in_reverse
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
elif m[0] == "cruise":
if m[1] == "down":
cruise_button = CruiseButtons.DECEL_SET
is_openpilot_engaged = True
elif m[1] == "up":
cruise_button = CruiseButtons.RES_ACCEL
is_openpilot_engaged = True
elif m[1] == "cancel":
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
elif m[0] == "quit":
break
throttle_out = throttle_manual * throttle_manual_multiplier
steer_out = steer_manual * steer_manual_multiplier
brake_out = brake_manual * brake_manual_multiplier
#steer_out = steer_out
# steer_out = steer_rate_limit(old_steer, steer_out)
old_steer = steer_out
old_throttle = throttle_out
old_brake = brake_out
# print('message',old_throttle, old_steer, old_brake)
if is_openpilot_engaged:
sm.update(0)
throttle_op = sm['carControl'].actuators.gas #[0,1]
brake_op = sm['carControl'].actuators.brake #[0,1]
steer_op = sm['controlsState'].steeringAngleDesiredDeg # degrees [-180,180]
throttle_out = throttle_op
steer_out = steer_op
brake_out = brake_op
steer_out = steer_rate_limit(old_steer, steer_out)
old_steer = steer_out
else:
if throttle_out==0 and old_throttle>0:
if throttle_ease_out_counter>0:
throttle_out = old_throttle
throttle_ease_out_counter += -1
else:
throttle_ease_out_counter = REPEAT_COUNTER
old_throttle = 0
if brake_out==0 and old_brake>0:
if brake_ease_out_counter>0:
brake_out = old_brake
brake_ease_out_counter += -1
else:
brake_ease_out_counter = REPEAT_COUNTER
old_brake = 0
if steer_out==0 and old_steer!=0:
if steer_ease_out_counter>0:
steer_out = old_steer
steer_ease_out_counter += -1
else:
steer_ease_out_counter = REPEAT_COUNTER
old_steer = 0
# --------------Step 2-------------------------------
steer_carla = steer_out / (max_steer_angle * STEER_RATIO * -1)
steer_carla = np.clip(steer_carla, -1,1)
steer_out = steer_carla * (max_steer_angle * STEER_RATIO * -1)
old_steer = steer_carla * (max_steer_angle * STEER_RATIO * -1)
vc.throttle = throttle_out/0.6
vc.steer = steer_carla
vc.brake = brake_out
vehicle.apply_control(vc)
# --------------Step 3-------------------------------
vel = vehicle.get_velocity()
speed = math.sqrt(vel.x**2 + vel.y**2 + vel.z**2) # in m/s
vehicle_state.speed = speed
vehicle_state.vel = vel
vehicle_state.angle = steer_out
vehicle_state.cruise_button = cruise_button
vehicle_state.is_engaged = is_openpilot_engaged
if rk.frame%PRINT_DECIMATION == 0:
print("frame: ", "engaged:", is_openpilot_engaged, "; throttle: ", round(vc.throttle, 3), "; steer(c/deg): ", round(vc.steer, 3), round(steer_out, 3), "; brake: ", round(vc.brake, 3))
rk.keep_time()
# Clean up resources in the opposite order they were created.
exit_event.set()
for t in reversed(threads):
t.join()
gps.destroy()
imu.destroy()
camera.destroy()
vehicle.destroy()
def bridge_keep_alive(q: Any):
while 1:
try:
bridge(q)
break
except RuntimeError:
print("Restarting bridge...")
if __name__ == "__main__":
# make sure params are in a good state
set_params_enabled()
msg = messaging.new_message('liveCalibration')
msg.liveCalibration.validBlocks = 20
msg.liveCalibration.rpyCalib = [0.0, 0.0, 0.0]
Params().put("CalibrationParams", msg.to_bytes())
q: Any = Queue()
p = Process(target=bridge_keep_alive, args=(q,), daemon=True)
p.start()
if args.joystick:
# start input poll for joystick
from lib.manual_ctrl import wheel_poll_thread
wheel_poll_thread(q)
p.join()
else:
# start input poll for keyboard
from lib.keyboard_ctrl import keyboard_poll_thread
keyboard_poll_thread(q)
|
ViewWinRenderedGrid.py
|
'''
Created on Oct 5, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
Note speed of using tkinter appears to be slow to render with tkinter (see profiling below)
Typical example is an instance takes 3 secs for view() for an open table and
subsequently after Python operations concluded, tkinter takes additional 27 secs to render
'''
import os, threading, time
from tkinter import Menu, BooleanVar, font as tkFont
from arelle import (ViewWinGrid, ModelDocument, ModelDtsObject, ModelInstanceObject, XbrlConst,
ModelXbrl, XmlValidate, XmlUtil, Locale, FunctionXfi)
from arelle.ModelValue import qname, QName
from arelle.RenderingResolver import resolveAxesStructure, RENDER_UNITS_PER_CHAR
from arelle.ModelFormulaObject import Aspect, aspectModels, aspectRuleAspects, aspectModelAspect
from arelle.ModelInstanceObject import ModelDimensionValue
from arelle.ModelRenderingObject import (ModelClosedDefinitionNode, ModelEuAxisCoord, ModelTable,
ModelFilterDefinitionNode,
OPEN_ASPECT_ENTRY_SURROGATE)
from arelle.FormulaEvaluator import aspectMatches
from arelle.PrototypeInstanceObject import FactPrototype
from arelle.UiUtil import (gridBorder, gridSpacer, gridHdr, gridCell, gridCombobox,
label,
TOPBORDER, LEFTBORDER, RIGHTBORDER, BOTTOMBORDER, CENTERCELL)
from arelle.DialogNewFactItem import getNewFactItemOptions
from collections import defaultdict
emptyList = []
ENTRY_WIDTH_IN_CHARS = 12 # width of a data column entry cell in characters (nominal)
PADDING = 20 # screen units of padding between entry cells
def viewRenderedGrid(modelXbrl, tabWin, lang=None):
modelXbrl.modelManager.showStatus(_("viewing rendering"))
view = ViewRenderedGrid(modelXbrl, tabWin, lang)
view.blockMenuEvents = 1
menu = view.contextMenu()
optionsMenu = Menu(view.viewFrame, tearoff=0)
optionsMenu.add_command(label=_("New fact item options"), underline=0, command=lambda: getNewFactItemOptions(modelXbrl.modelManager.cntlr, view.newFactItemOptions))
optionsMenu.add_command(label=_("Open breakdown entry rows"), underline=0, command=view.setOpenBreakdownEntryRows)
view.ignoreDimValidity.trace("w", view.viewReloadDueToMenuAction)
optionsMenu.add_checkbutton(label=_("Ignore Dimensional Validity"), underline=0, variable=view.ignoreDimValidity, onvalue=True, offvalue=False)
view.xAxisChildrenFirst.trace("w", view.viewReloadDueToMenuAction)
optionsMenu.add_checkbutton(label=_("X-Axis Children First"), underline=0, variable=view.xAxisChildrenFirst, onvalue=True, offvalue=False)
view.yAxisChildrenFirst.trace("w", view.viewReloadDueToMenuAction)
optionsMenu.add_checkbutton(label=_("Y-Axis Children First"), underline=0, variable=view.yAxisChildrenFirst, onvalue=True, offvalue=False)
menu.add_cascade(label=_("Options"), menu=optionsMenu, underline=0)
view.tablesMenu = Menu(view.viewFrame, tearoff=0)
menu.add_cascade(label=_("Tables"), menu=view.tablesMenu, underline=0)
view.tablesMenuLength = 0
view.menuAddLangs()
saveMenu = Menu(view.viewFrame, tearoff=0)
saveMenu.add_command(label=_("HTML file"), underline=0, command=lambda: view.modelXbrl.modelManager.cntlr.fileSave(view=view, fileType="html"))
saveMenu.add_command(label=_("Layout model"), underline=0, command=lambda: view.modelXbrl.modelManager.cntlr.fileSave(view=view, fileType="xml"))
saveMenu.add_command(label=_("XBRL instance"), underline=0, command=view.saveInstance)
menu.add_cascade(label=_("Save"), menu=saveMenu, underline=0)
view.view()
view.blockSelectEvent = 1
view.blockViewModelObject = 0
view.viewFrame.bind("<Enter>", view.cellEnter, '+')
view.viewFrame.bind("<Leave>", view.cellLeave, '+')
view.viewFrame.bind("<FocusOut>", view.onQuitView, '+')
view.viewFrame.bind("<1>", view.onClick, '+')
view.viewFrame.bind("<Configure>", view.onConfigure, '+') # frame resized, redo column header wrap length ratios
view.blockMenuEvents = 0
class ViewRenderedGrid(ViewWinGrid.ViewGrid):
def __init__(self, modelXbrl, tabWin, lang):
super(ViewRenderedGrid, self).__init__(modelXbrl, tabWin, "Table", True, lang)
self.newFactItemOptions = ModelInstanceObject.NewFactItemOptions(xbrlInstance=modelXbrl)
self.factPrototypes = []
self.aspectEntryObjectIdsNode = {}
self.aspectEntryObjectIdsCell = {}
self.factPrototypeAspectEntryObjectIds = defaultdict(set)
self.zOrdinateChoices = None
# context menu Boolean vars
self.options = self.modelXbrl.modelManager.cntlr.config.setdefault("viewRenderedGridOptions", {})
self.openBreakdownLines = self.options.setdefault("openBreakdownLines", 5) # ensure there is a default entry
self.ignoreDimValidity = BooleanVar(value=self.options.setdefault("ignoreDimValidity",True))
self.xAxisChildrenFirst = BooleanVar(value=self.options.setdefault("xAxisChildrenFirst",True))
self.yAxisChildrenFirst = BooleanVar(value=self.options.setdefault("yAxisChildrenFirst",False))
def close(self):
super(ViewRenderedGrid, self).close()
if self.modelXbrl:
for fp in self.factPrototypes:
fp.clear()
self.factPrototypes = None
self.aspectEntryObjectIdsNode.clear()
self.aspectEntryObjectIdsCell.clear()
def loadTablesMenu(self):
tblMenuEntries = {}
tblRelSet = self.modelXbrl.relationshipSet("Table-rendering")
self.tablesToELR = {}
for tblLinkroleUri in tblRelSet.linkRoleUris:
for tableAxisArcrole in (XbrlConst.euTableAxis, XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301, XbrlConst.tableAxis2011):
tblAxisRelSet = self.modelXbrl.relationshipSet(tableAxisArcrole, tblLinkroleUri)
if tblAxisRelSet and len(tblAxisRelSet.modelRelationships) > 0:
# table name
modelRoleTypes = self.modelXbrl.roleTypes.get(tblLinkroleUri)
if modelRoleTypes is not None and len(modelRoleTypes) > 0:
roledefinition = modelRoleTypes[0].definition
if roledefinition is None or roledefinition == "":
roledefinition = os.path.basename(tblLinkroleUri)
for table in tblAxisRelSet.rootConcepts:
# add table to menu if there's any entry
tblMenuEntries[roledefinition] = tblLinkroleUri
self.tablesToELR[table.objectId()] = tblLinkroleUri
break
self.tablesMenu.delete(0, self.tablesMenuLength)
self.tablesMenuLength = 0
self.tblELR = None
for tblMenuEntry in sorted(tblMenuEntries.items()):
tbl,elr = tblMenuEntry
self.tablesMenu.add_command(label=tbl, command=lambda e=elr: self.view(viewTblELR=e)) # use this to activate profiling from menu selection: , profile=True))
self.tablesMenuLength += 1
if self.tblELR is None:
self.tblELR = elr # start viewing first ELR
def viewReloadDueToMenuAction(self, *args):
if not self.blockMenuEvents:
# update config (config saved when exiting)
self.options["ignoreDimValidity"] = self.ignoreDimValidity.get()
self.options["xAxisChildrenFirst"] = self.xAxisChildrenFirst.get()
self.options["yAxisChildrenFirst"] = self.yAxisChildrenFirst.get()
self.view()
def setOpenBreakdownEntryRows(self, *args):
import tkinter.simpledialog
newValue = tkinter.simpledialog.askinteger(_("arelle - Open breakdown entry rows setting"),
_("The number of extra entry rows for open breakdowns is: {0} \n\n"
"(When a row header includes an open breakdown, such as \nfor typed dimension(s), this number of extra entry rows \nare provided below the table.)"
).format(self.options["openBreakdownLines"]),
parent=self.tabWin)
if newValue is not None:
self.options["openBreakdownLines"] = self.openBreakdownLines = newValue
self.viewReloadDueToMenuAction()
def view(self, viewTblELR=None, newInstance=None, profile=False):
'''
if profile: # for debugging only, to use, uncomment in loadTablesMenu
import cProfile, pstats, sys
statsFile = "/Users/hermf/temp/profileRendering.bin"
cProfile.runctx("self.view(viewTblELR=viewTblELR)", globals(), locals(), statsFile)
priorStdOut = sys.stdout
sys.stdout = open("/Users/hermf/temp/profileRendering.txt", "w")
statObj = pstats.Stats(statsFile)
statObj.strip_dirs()
statObj.sort_stats("time")
statObj.print_stats()
statObj.print_callees()
statObj.print_callers()
sys.stdout.flush()
sys.stdout.close()
del statObj
sys.stdout = priorStdOut
os.remove(statsFile)
return
'''
startedAt = time.time()
self.blockMenuEvents += 1
if newInstance is not None:
self.modelXbrl = newInstance # a save operation has created a new instance to use subsequently
clearZchoices = False
if viewTblELR: # specific table selection
self.tblELR = viewTblELR
clearZchoices = True
else: # first or subsequenct reloading (language, dimensions, other change)
clearZchoices = self.zOrdinateChoices is None
if clearZchoices: # also need first time initialization
self.loadTablesMenu() # load menus (and initialize if first time
viewTblELR = self.tblELR
if not self.tblELR:
return # no table to display
if clearZchoices:
self.zOrdinateChoices = {}
# remove old widgets
self.viewFrame.clearGrid()
tblAxisRelSet, xTopStructuralNode, yTopStructuralNode, zTopStructuralNode = resolveAxesStructure(self, viewTblELR)
self.hasTableFilters = bool(self.modelTable.filterRelationships)
if tblAxisRelSet:
# review row header wrap widths and limit to 2/3 of the frame width (all are screen units)
fontWidth = tkFont.Font(font='TkTextFont').configure()['size']
fontWidth = fontWidth * 3 // 2
dataColsAllowanceWidth = (fontWidth * ENTRY_WIDTH_IN_CHARS + PADDING) * self.dataCols + PADDING
frameWidth = self.viewFrame.winfo_width()
if dataColsAllowanceWidth + self.rowHdrWrapLength > frameWidth:
if dataColsAllowanceWidth > frameWidth / 2:
rowHdrAllowanceWidth = frameWidth / 2
else:
rowHdrAllowanceWidth = frameWidth - dataColsAllowanceWidth
if self.rowHdrWrapLength > rowHdrAllowanceWidth:
widthRatio = rowHdrAllowanceWidth / self.rowHdrWrapLength
self.rowHdrWrapLength = rowHdrAllowanceWidth
fixedWidth = sum(w for w in self.rowHdrColWidth if w <= RENDER_UNITS_PER_CHAR)
adjustableWidth = sum(w for w in self.rowHdrColWidth if w > RENDER_UNITS_PER_CHAR)
if adjustableWidth> 0:
widthRatio = (rowHdrAllowanceWidth - fixedWidth) / adjustableWidth
for i in range(len(self.rowHdrColWidth)):
w = self.rowHdrColWidth[i]
if w > RENDER_UNITS_PER_CHAR:
self.rowHdrColWidth[i] = int(w * widthRatio)
self.aspectEntryObjectIdsNode.clear()
self.aspectEntryObjectIdsCell.clear()
self.factPrototypeAspectEntryObjectIds.clear()
#print("tbl hdr width rowHdrCols {0}".format(self.rowHdrColWidth))
self.gridTblHdr.tblHdrWraplength = 200 # to adjust dynamically during configure callbacks
self.gridTblHdr.tblHdrLabel = \
gridHdr(self.gridTblHdr, 0, 0,
(self.modelTable.genLabel(lang=self.lang, strip=True) or # use table label, if any
self.roledefinition),
anchor="nw",
#columnspan=(self.dataFirstCol - 1),
#rowspan=(self.dataFirstRow),
wraplength=200) # in screen units
#wraplength=sum(self.rowHdrColWidth)) # in screen units
self.zAspectStructuralNodes = defaultdict(set)
self.zAxis(1, zTopStructuralNode, clearZchoices)
xStructuralNodes = []
self.xAxis(self.dataFirstCol, self.colHdrTopRow, self.colHdrTopRow + self.colHdrRows - 1,
xTopStructuralNode, xStructuralNodes, self.xAxisChildrenFirst.get(), True, True)
self.yAxis(1, self.dataFirstRow,
yTopStructuralNode, self.yAxisChildrenFirst.get(), True, True)
for fp in self.factPrototypes: # dereference prior facts
if fp is not None:
fp.clear()
self.factPrototypes = []
self.bodyCells(self.dataFirstRow, yTopStructuralNode, xStructuralNodes, self.zAspectStructuralNodes, self.yAxisChildrenFirst.get())
# data cells
#print("body cells done")
self.modelXbrl.profileStat("viewTable_" + os.path.basename(viewTblELR), time.time() - startedAt)
#self.gridView.config(scrollregion=self.gridView.bbox(constants.ALL))
self.blockMenuEvents -= 1
def zAxis(self, row, zStructuralNode, clearZchoices):
if zStructuralNode is not None:
gridBorder(self.gridColHdr, self.dataFirstCol, row, TOPBORDER, columnspan=2)
gridBorder(self.gridColHdr, self.dataFirstCol, row, LEFTBORDER)
gridBorder(self.gridColHdr, self.dataFirstCol, row, RIGHTBORDER, columnspan=2)
label = zStructuralNode.header(lang=self.lang)
hdr = gridHdr(self.gridColHdr, self.dataFirstCol, row,
label,
anchor="w", columnspan=2,
wraplength=200, # in screen units
objectId=zStructuralNode.objectId(),
onClick=self.onClick)
if zStructuralNode.choiceStructuralNodes is not None: # combo box
valueHeaders = [''.ljust(zChoiceStructuralNode.indent * 4) + # indent if nested choices
(zChoiceStructuralNode.header(lang=self.lang) or '')
for zChoiceStructuralNode in zStructuralNode.choiceStructuralNodes]
zAxisIsOpenExplicitDimension = False
zAxisTypedDimension = None
i = zStructuralNode.choiceNodeIndex # for aspect entry, use header selected
comboBoxValue = None if i >= 0 else zStructuralNode.aspects.get('aspectValueLabel')
chosenStructuralNode = zStructuralNode.choiceStructuralNodes[i]
aspect = None
for aspect in chosenStructuralNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
# for open filter nodes of explicit dimension allow selection of all values
zAxisAspectEntryMode = False
if isinstance(chosenStructuralNode.definitionNode, ModelFilterDefinitionNode):
if isinstance(aspect, QName):
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
if len(valueHeaders) != 1 or valueHeaders[0]: # not just a blank initial entry
valueHeaders.append("(all members)")
else:
valueHeaders.extend(
self.explicitDimensionFilterMembers(zStructuralNode, chosenStructuralNode))
zAxisAspectEntryMode = True
zAxisIsOpenExplicitDimension = True
elif dimConcept.isTypedDimension:
if (zStructuralNode.choiceStructuralNodes[0].contextItemBinding is None and
not valueHeaders[0]): # remove filterNode from the list
''' this isn't reliable
if i > 0:
del zStructuralNode.choiceStructuralNodes[0]
del valueHeaders[0]
zStructuralNode.choiceNodeIndex = i = i-1
'''
if i >= 0:
chosenStructuralNode = zStructuralNode.choiceStructuralNodes[i]
else:
chosenStructuralNode = zStructuralNode # use aspects of structural node (for entered typed value)
if not comboBoxValue and not valueHeaders:
comboBoxValue = "--please select--"
i = -1
valueHeaders.append("(enter typed member)")
zAxisTypedDimension = dimConcept
combobox = gridCombobox(
self.gridColHdr, self.dataFirstCol + 2, row,
values=valueHeaders,
value=comboBoxValue,
selectindex=zStructuralNode.choiceNodeIndex if i >= 0 else None,
columnspan=2,
state=["readonly"],
comboboxselected=self.onZComboBoxSelected)
combobox.zStructuralNode = zStructuralNode
combobox.zAxisIsOpenExplicitDimension = zAxisIsOpenExplicitDimension
combobox.zAxisTypedDimension = zAxisTypedDimension
combobox.zAxisAspectEntryMode = zAxisAspectEntryMode
combobox.zAxisAspect = aspect
combobox.zChoiceOrdIndex = row - 1
combobox.objectId = hdr.objectId = zStructuralNode.objectId()
gridBorder(self.gridColHdr, self.dataFirstCol + 3, row, RIGHTBORDER)
# add aspect for chosen node
self.setZStructuralNodeAspects(chosenStructuralNode)
else:
#process aspect on this node before child nodes in case it is overridden
self.setZStructuralNodeAspects(zStructuralNode)
# nested nodes override parent nodes
for zStructuralNode in zStructuralNode.childStructuralNodes:
self.zAxis(row + 1, zStructuralNode, clearZchoices)
def setZStructuralNodeAspects(self, zStructuralNode, add=True):
for aspect in aspectModels[self.aspectModel]:
if (aspect in zStructuralNode.aspects or # might be added as custom-entered value (typed dim)
zStructuralNode.hasAspect(aspect, inherit=True)): #implies inheriting from other z axes
if aspect == Aspect.DIMENSIONS:
for dim in (zStructuralNode.aspectValue(Aspect.DIMENSIONS, inherit=True) or emptyList):
if add:
self.zAspectStructuralNodes[dim].add(zStructuralNode)
else:
self.zAspectStructuralNodes[dim].discard(zStructuralNode)
else:
if add:
self.zAspectStructuralNodes[aspect].add(zStructuralNode)
else:
self.zAspectStructuralNodes[aspect].discard(zStructuralNode)
def onZComboBoxSelected(self, event):
if self.hasChangesToSave():
import tkinter.messagebox
reply = tkinter.messagebox.askyesnocancel(
_("arelle - Unsaved Changes"),
_("Save unsaved changes before Z-axis change? \n(No will discard changes.)"),
parent=self.tabWin)
if reply is None:
return # cancel
if reply: # yes
self.saveInstance(onSaved=lambda: self.onZComboBoxSelected(event))
return # called again after saving on ui foreground thread
combobox = event.widget
structuralNode = combobox.zStructuralNode
if combobox.zAxisAspectEntryMode:
aspectValue = structuralNode.aspectEntryHeaderValues.get(combobox.value)
if aspectValue is not None:
self.zOrdinateChoices[combobox.zStructuralNode.definitionNode] = \
structuralNode.aspects = {combobox.zAxisAspect: aspectValue, 'aspectValueLabel': combobox.value}
self.view() # redraw grid
elif combobox.zAxisIsOpenExplicitDimension and combobox.value == "(all members)":
# reload combo box
self.comboboxLoadExplicitDimension(combobox,
structuralNode, # owner of combobox
structuralNode.choiceStructuralNodes[structuralNode.choiceNodeIndex]) # aspect filter node
structuralNode.choiceNodeIndex = -1 # use entry aspect value
combobox.zAxisAspectEntryMode = True
elif combobox.zAxisTypedDimension is not None and combobox.value == "(enter typed member)":
# ask typed member entry
import tkinter.simpledialog
result = tkinter.simpledialog.askstring(_("Enter new typed dimension value"),
combobox.zAxisTypedDimension.label(),
parent=self.tabWin)
if result:
structuralNode.choiceNodeIndex = -1 # use entry aspect value
aspectValue = FunctionXfi.create_element(self.rendrCntx,
None,
(combobox.zAxisTypedDimension.typedDomainElement.qname, (), result))
self.zOrdinateChoices[combobox.zStructuralNode.definitionNode] = \
structuralNode.aspects = {combobox.zAxisAspect: aspectValue,
Aspect.DIMENSIONS: {combobox.zAxisTypedDimension.qname},
'aspectValueLabel': result}
if not hasattr(structuralNode, "aspectEntryHeaderValues"): structuralNode.aspectEntryHeaderValues = {}
structuralNode.aspectEntryHeaderValues[result] = aspectValue
valueHeaders = list(combobox["values"])
if result not in valueHeaders: valueHeaders.insert(0, result)
combobox["values"] = valueHeaders
combobox.zAxisAspectEntryMode = True
self.view() # redraw grid
else:
# remove prior combo choice aspect
self.setZStructuralNodeAspects(structuralNode.choiceStructuralNodes[structuralNode.choiceNodeIndex], add=False)
i = combobox.valueIndex
self.zOrdinateChoices[combobox.zStructuralNode.definitionNode] = structuralNode.choiceNodeIndex = i
# set current combo choice aspect
self.setZStructuralNodeAspects(structuralNode.choiceStructuralNodes[i])
self.view() # redraw grid
def xAxis(self, leftCol, topRow, rowBelow, xParentStructuralNode, xStructuralNodes, childrenFirst, renderNow, atTop):
if xParentStructuralNode is not None:
parentRow = rowBelow
noDescendants = True
rightCol = leftCol
widthToSpanParent = 0
sideBorder = not xStructuralNodes
if atTop and sideBorder and childrenFirst:
gridBorder(self.gridColHdr, self.dataFirstCol, 1, LEFTBORDER, rowspan=self.dataFirstRow)
for xStructuralNode in xParentStructuralNode.childStructuralNodes:
if not xStructuralNode.isRollUp:
noDescendants = False
rightCol, row, width, leafNode = self.xAxis(leftCol, topRow + 1, rowBelow, xStructuralNode, xStructuralNodes, # nested items before totals
childrenFirst, childrenFirst, False)
if row - 1 < parentRow:
parentRow = row - 1
#if not leafNode:
# rightCol -= 1
isLabeled = xStructuralNode.isLabeled
nonAbstract = not xStructuralNode.isAbstract and isLabeled
if nonAbstract and isLabeled:
width += 100 # width for this label, in screen units
widthToSpanParent += width
if childrenFirst:
thisCol = rightCol
sideBorder = RIGHTBORDER
else:
thisCol = leftCol
sideBorder = LEFTBORDER
if renderNow and isLabeled:
columnspan = (rightCol - leftCol + (1 if nonAbstract else 0))
gridBorder(self.gridColHdr, leftCol, topRow, TOPBORDER, columnspan=columnspan)
gridBorder(self.gridColHdr, leftCol, topRow,
sideBorder, columnspan=columnspan,
rowspan=(rowBelow - topRow + 1) )
label = xStructuralNode.header(lang=self.lang,
returnGenLabel=isinstance(xStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord)))
gridHdr(self.gridColHdr, leftCol, topRow,
label if label else " ",
anchor="center",
columnspan=(rightCol - leftCol + (1 if nonAbstract else 0)),
rowspan=(row - topRow + 1) if leafNode else 1,
wraplength=width, # screen units
objectId=xStructuralNode.objectId(),
onClick=self.onClick)
if nonAbstract:
for i, role in enumerate(self.colHdrNonStdRoles):
gridBorder(self.gridColHdr, thisCol, self.dataFirstRow - len(self.colHdrNonStdRoles) + i, TOPBORDER)
gridBorder(self.gridColHdr, thisCol, self.dataFirstRow - len(self.colHdrNonStdRoles) + i, sideBorder)
gridHdr(self.gridColHdr, thisCol, self.dataFirstRow - len(self.colHdrNonStdRoles) + i,
xStructuralNode.header(role=role, lang=self.lang),
anchor="center",
wraplength=100, # screen units
objectId=xStructuralNode.objectId(),
onClick=self.onClick)
''' was
if self.colHdrDocRow:
gridBorder(self.gridColHdr, thisCol, self.dataFirstRow - 1 - self.rowHdrCodeCol, TOPBORDER)
gridBorder(self.gridColHdr, thisCol, self.dataFirstRow - 1 - self.rowHdrCodeCol, sideBorder)
gridHdr(self.gridColHdr, thisCol, self.dataFirstRow - 1 - self.rowHdrCodeCol,
xStructuralNode.header(role="http://www.xbrl.org/2008/role/documentation",
lang=self.lang),
anchor="center",
wraplength=100, # screen units
objectId=xStructuralNode.objectId(),
onClick=self.onClick)
if self.colHdrCodeRow:
gridBorder(self.gridColHdr, thisCol, self.dataFirstRow - 1, TOPBORDER)
gridBorder(self.gridColHdr, thisCol, self.dataFirstRow - 1, sideBorder)
gridHdr(self.gridColHdr, thisCol, self.dataFirstRow - 1,
xStructuralNode.header(role="http://www.eurofiling.info/role/2010/coordinate-code"),
anchor="center",
wraplength=100, # screen units
objectId=xStructuralNode.objectId(),
onClick=self.onClick)
'''
gridBorder(self.gridColHdr, thisCol, self.dataFirstRow - 1, BOTTOMBORDER)
xStructuralNodes.append(xStructuralNode)
if nonAbstract:
rightCol += 1
if renderNow and not childrenFirst:
self.xAxis(leftCol + (1 if nonAbstract else 0), topRow + 1, rowBelow, xStructuralNode, xStructuralNodes, childrenFirst, True, False) # render on this pass
leftCol = rightCol
if atTop and sideBorder and not childrenFirst:
gridBorder(self.gridColHdr, rightCol - 1, 1, RIGHTBORDER, rowspan=self.dataFirstRow)
return (rightCol, parentRow, widthToSpanParent, noDescendants)
def yAxis(self, leftCol, row, yParentStructuralNode, childrenFirst, renderNow, atLeft):
if yParentStructuralNode is not None:
nestedBottomRow = row
if atLeft:
gridBorder(self.gridRowHdr, self.rowHdrCols + len(self.rowHdrNonStdRoles), # was: self.rowHdrDocCol + self.rowHdrCodeCol,
self.dataFirstRow,
RIGHTBORDER,
rowspan=self.dataRows)
gridBorder(self.gridRowHdr, 1, self.dataFirstRow + self.dataRows - 1,
BOTTOMBORDER,
columnspan=(self.rowHdrCols + len(self.rowHdrNonStdRoles))) # was: self.rowHdrDocCol + self.rowHdrCodeCol))
for yStructuralNode in yParentStructuralNode.childStructuralNodes:
if not yStructuralNode.isRollUp:
isAbstract = (yStructuralNode.isAbstract or
(yStructuralNode.childStructuralNodes and
not isinstance(yStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord))))
isNonAbstract = not isAbstract
isLabeled = yStructuralNode.isLabeled
nestRow, nextRow = self.yAxis(leftCol + isLabeled, row, yStructuralNode, # nested items before totals
childrenFirst, childrenFirst, False)
topRow = row
if childrenFirst and isNonAbstract:
row = nextRow
if renderNow and isLabeled:
columnspan = self.rowHdrCols - leftCol + 1 if isNonAbstract or nextRow == row else None
gridBorder(self.gridRowHdr, leftCol, topRow, LEFTBORDER,
rowspan=(nestRow - topRow + 1) )
gridBorder(self.gridRowHdr, leftCol, topRow, TOPBORDER,
columnspan=(1 if childrenFirst and nextRow > row else columnspan))
if childrenFirst and row > topRow:
gridBorder(self.gridRowHdr, leftCol + 1, row, TOPBORDER,
columnspan=(self.rowHdrCols - leftCol))
depth = yStructuralNode.depth
wraplength = (self.rowHdrColWidth[depth] if isAbstract else
self.rowHdrWrapLength - sum(self.rowHdrColWidth[0:depth]))
if wraplength < 0:
wraplength = self.rowHdrColWidth[depth]
label = yStructuralNode.header(lang=self.lang,
returnGenLabel=isinstance(yStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord)),
recurseParent=not isinstance(yStructuralNode.definitionNode, ModelFilterDefinitionNode))
if label != OPEN_ASPECT_ENTRY_SURROGATE:
gridHdr(self.gridRowHdr, leftCol, row,
label if label is not None else " ",
anchor=("w" if isNonAbstract or nestRow == row else "center"),
columnspan=columnspan,
rowspan=(nestRow - row if isAbstract else None),
# wraplength is in screen units
wraplength=wraplength,
#minwidth=self.rowHdrColWidth[leftCol],
minwidth=(RENDER_UNITS_PER_CHAR if isNonAbstract and nextRow > topRow else None),
objectId=yStructuralNode.objectId(),
onClick=self.onClick)
else:
self.aspectEntryObjectIdsNode[yStructuralNode.aspectEntryObjectId] = yStructuralNode
self.aspectEntryObjectIdsCell[yStructuralNode.aspectEntryObjectId] = gridCombobox(
self.gridRowHdr, leftCol, row,
values=self.aspectEntryValues(yStructuralNode),
width=int(max(wraplength/RENDER_UNITS_PER_CHAR, 5)), # width is in characters, not screen units
objectId=yStructuralNode.aspectEntryObjectId,
comboboxselected=self.onAspectComboboxSelection)
if isNonAbstract:
for i, role in enumerate(self.rowHdrNonStdRoles):
isCode = "code" in role
docCol = self.dataFirstCol - len(self.rowHdrNonStdRoles) + i
gridBorder(self.gridRowHdr, docCol, row, TOPBORDER)
gridBorder(self.gridRowHdr, docCol, row, LEFTBORDER)
gridHdr(self.gridRowHdr, docCol, row,
yStructuralNode.header(role=role, lang=self.lang),
anchor="c" if isCode else "w",
wraplength=40 if isCode else 100, # screen units
objectId=yStructuralNode.objectId(),
onClick=self.onClick)
''' was:
if self.rowHdrDocCol:
docCol = self.dataFirstCol - 1 - self.rowHdrCodeCol
gridBorder(self.gridRowHdr, docCol, row, TOPBORDER)
gridBorder(self.gridRowHdr, docCol, row, LEFTBORDER)
gridHdr(self.gridRowHdr, docCol, row,
yStructuralNode.header(role="http://www.xbrl.org/2008/role/documentation",
lang=self.lang),
anchor="w",
wraplength=100, # screen units
objectId=yStructuralNode.objectId(),
onClick=self.onClick)
if self.rowHdrCodeCol:
codeCol = self.dataFirstCol - 1
gridBorder(self.gridRowHdr, codeCol, row, TOPBORDER)
gridBorder(self.gridRowHdr, codeCol, row, LEFTBORDER)
gridHdr(self.gridRowHdr, codeCol, row,
yStructuralNode.header(role="http://www.eurofiling.info/role/2010/coordinate-code"),
anchor="center",
wraplength=40, # screen units
objectId=yStructuralNode.objectId(),
onClick=self.onClick)
# gridBorder(self.gridRowHdr, leftCol, self.dataFirstRow - 1, BOTTOMBORDER)
'''
if isNonAbstract:
row += 1
elif childrenFirst:
row = nextRow
if nestRow > nestedBottomRow:
nestedBottomRow = nestRow + (isNonAbstract and not childrenFirst)
if row > nestedBottomRow:
nestedBottomRow = row
#if renderNow and not childrenFirst:
# dummy, row = self.yAxis(leftCol + 1, row, yStructuralNode, childrenFirst, True, False) # render on this pass
if not childrenFirst:
dummy, row = self.yAxis(leftCol + isLabeled, row, yStructuralNode, childrenFirst, renderNow, False) # render on this pass
return (nestedBottomRow, row)
def bodyCells(self, row, yParentStructuralNode, xStructuralNodes, zAspectStructuralNodes, yChildrenFirst):
if yParentStructuralNode is not None:
dimDefaults = self.modelXbrl.qnameDimensionDefaults
for yStructuralNode in yParentStructuralNode.childStructuralNodes:
if yChildrenFirst:
row = self.bodyCells(row, yStructuralNode, xStructuralNodes, zAspectStructuralNodes, yChildrenFirst)
if not (yStructuralNode.isAbstract or
(yStructuralNode.childStructuralNodes and
not isinstance(yStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord)))) and yStructuralNode.isLabeled:
isEntryPrototype = yStructuralNode.isEntryPrototype(default=False) # row to enter open aspects
yAspectStructuralNodes = defaultdict(set)
for aspect in aspectModels[self.aspectModel]:
if yStructuralNode.hasAspect(aspect):
if aspect == Aspect.DIMENSIONS:
for dim in (yStructuralNode.aspectValue(Aspect.DIMENSIONS) or emptyList):
yAspectStructuralNodes[dim].add(yStructuralNode)
else:
yAspectStructuralNodes[aspect].add(yStructuralNode)
yTagSelectors = yStructuralNode.tagSelectors
gridSpacer(self.gridBody, self.dataFirstCol, row, LEFTBORDER)
# data for columns of row
#print ("row " + str(row) + "yNode " + yStructuralNode.definitionNode.objectId() )
ignoreDimValidity = self.ignoreDimValidity.get()
for i, xStructuralNode in enumerate(xStructuralNodes):
xAspectStructuralNodes = defaultdict(set)
for aspect in aspectModels[self.aspectModel]:
if xStructuralNode.hasAspect(aspect):
if aspect == Aspect.DIMENSIONS:
for dim in (xStructuralNode.aspectValue(Aspect.DIMENSIONS) or emptyList):
xAspectStructuralNodes[dim].add(xStructuralNode)
else:
xAspectStructuralNodes[aspect].add(xStructuralNode)
cellTagSelectors = yTagSelectors | xStructuralNode.tagSelectors
cellAspectValues = {}
matchableAspects = set()
for aspect in _DICT_SET(xAspectStructuralNodes.keys()) | _DICT_SET(yAspectStructuralNodes.keys()) | _DICT_SET(zAspectStructuralNodes.keys()):
aspectValue = xStructuralNode.inheritedAspectValue(yStructuralNode,
self, aspect, cellTagSelectors,
xAspectStructuralNodes, yAspectStructuralNodes, zAspectStructuralNodes)
# value is None for a dimension whose value is to be not reported in this slice
if (isinstance(aspect, _INT) or # not a dimension
dimDefaults.get(aspect) != aspectValue or # explicit dim defaulted will equal the value
aspectValue is not None): # typed dim absent will be none
cellAspectValues[aspect] = aspectValue
matchableAspects.add(aspectModelAspect.get(aspect,aspect)) #filterable aspect from rule aspect
cellDefaultedDims = _DICT_SET(dimDefaults) - _DICT_SET(cellAspectValues.keys())
priItemQname = cellAspectValues.get(Aspect.CONCEPT)
concept = self.modelXbrl.qnameConcepts.get(priItemQname)
conceptNotAbstract = concept is None or not concept.isAbstract
from arelle.ValidateXbrlDimensions import isFactDimensionallyValid
value = None
objectId = None
justify = None
fp = FactPrototype(self, cellAspectValues)
if conceptNotAbstract:
# reduce set of matchable facts to those with pri item qname and have dimension aspects
facts = self.modelXbrl.factsByQname[priItemQname] if priItemQname else self.modelXbrl.factsInInstance
if self.hasTableFilters:
facts = self.modelTable.filterFacts(self.rendrCntx, facts)
for aspect in matchableAspects: # trim down facts with explicit dimensions match or just present
if isinstance(aspect, QName):
aspectValue = cellAspectValues.get(aspect, None)
if isinstance(aspectValue, ModelDimensionValue):
if aspectValue.isExplicit:
dimMemQname = aspectValue.memberQname # match facts with this explicit value
else:
dimMemQname = None # match facts that report this dimension
elif isinstance(aspectValue, QName):
dimMemQname = aspectValue # match facts that have this explicit value
elif aspectValue is None: # match typed dims that don't report this value
dimMemQname = ModelXbrl.DEFAULT
else:
dimMemQname = None # match facts that report this dimension
facts = facts & self.modelXbrl.factsByDimMemQname(aspect, dimMemQname)
for fact in facts:
if (all(aspectMatches(self.rendrCntx, fact, fp, aspect)
for aspect in matchableAspects) and
all(fact.context.dimMemberQname(dim,includeDefaults=True) in (dimDefaults[dim], None)
for dim in cellDefaultedDims)):
if yStructuralNode.hasValueExpression(xStructuralNode):
value = yStructuralNode.evalValueExpression(fact, xStructuralNode)
else:
value = fact.effectiveValue
objectId = fact.objectId()
justify = "right" if fact.isNumeric else "left"
break
if (conceptNotAbstract and
(value is not None or ignoreDimValidity or isFactDimensionallyValid(self, fp) or
isEntryPrototype)):
if objectId is None:
objectId = "f{0}".format(len(self.factPrototypes))
self.factPrototypes.append(fp) # for property views
for aspect, aspectValue in cellAspectValues.items():
if isinstance(aspectValue, str) and aspectValue.startswith(OPEN_ASPECT_ENTRY_SURROGATE):
self.factPrototypeAspectEntryObjectIds[objectId].add(aspectValue)
gridCell(self.gridBody, self.dataFirstCol + i, row, value,
justify=(justify or ("right" if fp.isNumeric else "left")),
width=ENTRY_WIDTH_IN_CHARS, # width is in characters, not screen units
objectId=objectId, onClick=self.onClick)
else:
fp.clear() # dereference
gridSpacer(self.gridBody, self.dataFirstCol + i, row, CENTERCELL)
gridSpacer(self.gridBody, self.dataFirstCol + i, row, RIGHTBORDER)
gridSpacer(self.gridBody, self.dataFirstCol + i, row, BOTTOMBORDER)
row += 1
if not yChildrenFirst:
row = self.bodyCells(row, yStructuralNode, xStructuralNodes, zAspectStructuralNodes, yChildrenFirst)
return row
def onClick(self, event):
try:
objId = event.widget.objectId
if objId and objId[0] == "f":
viewableObject = self.factPrototypes[int(objId[1:])]
else:
viewableObject = objId
self.modelXbrl.viewModelObject(viewableObject)
except AttributeError: # not clickable
pass
self.modelXbrl.modelManager.cntlr.currentView = self
def cellEnter(self, *args):
self.blockSelectEvent = 0
self.modelXbrl.modelManager.cntlr.currentView = self
def cellLeave(self, *args):
self.blockSelectEvent = 1
def cellSelect(self, *args):
if self.blockSelectEvent == 0 and self.blockViewModelObject == 0:
self.blockViewModelObject += 1
#self.modelXbrl.viewModelObject(self.nodeToObjectId[self.treeView.selection()[0]])
#self.modelXbrl.viewModelObject(self.treeView.selection()[0])
self.blockViewModelObject -= 1
def viewModelObject(self, modelObject):
if self.blockViewModelObject == 0:
self.blockViewModelObject += 1
try:
if isinstance(modelObject, ModelDtsObject.ModelRelationship):
objectId = modelObject.toModelObject.objectId()
else:
objectId = modelObject.objectId()
if objectId in self.tablesToELR:
self.view(viewTblELR=self.tablesToELR[objectId])
except (KeyError, AttributeError):
pass
self.blockViewModelObject -= 1
def onConfigure(self, event, *args):
if not self.blockMenuEvents:
lastFrameWidth = getattr(self, "lastFrameWidth", 0)
frameWidth = self.tabWin.winfo_width()
if lastFrameWidth != frameWidth:
self.updateInstanceFromFactPrototypes()
self.lastFrameWidth = frameWidth
if lastFrameWidth:
# frame resized, recompute row header column widths and lay out table columns
"""
def sleepAndReload():
time.sleep(.75)
self.viewReloadDueToMenuAction()
self.modelXbrl.modelManager.cntlr.uiThreadQueue.put((sleepAndReload, []))
"""
#self.modelXbrl.modelManager.cntlr.uiThreadQueue.put((self.viewReloadDueToMenuAction, []))
def deferredReload():
self.deferredReloadCount -= 1 # only do reload after all queued reload timers expire
if self.deferredReloadCount <= 0:
self.viewReloadDueToMenuAction()
self.deferredReloadCount = getattr(self, "deferredReloadCount", 0) + 1
self.viewFrame.after(1500, deferredReload)
def onQuitView(self, event, *args):
self.updateInstanceFromFactPrototypes()
def hasChangesToSave(self):
for bodyCell in self.gridRowHdr.winfo_children():
if isinstance(bodyCell, (gridCell,gridCombobox)) and bodyCell.isChanged:
return True
for bodyCell in self.gridBody.winfo_children():
if isinstance(bodyCell, gridCell) and bodyCell.isChanged:
return True
return False
def updateInstanceFromFactPrototypes(self):
# Only update the model if it already exists
if self.modelXbrl.modelDocument.type == ModelDocument.Type.INSTANCE:
instance = self.modelXbrl
newCntx = ModelXbrl.AUTO_LOCATE_ELEMENT
newUnit = ModelXbrl.AUTO_LOCATE_ELEMENT
# check user keyed changes to aspects
aspectEntryChanges = {} # index = widget ID, value = widget contents
for bodyCell in self.gridRowHdr.winfo_children():
if isinstance(bodyCell, (gridCell,gridCombobox)) and bodyCell.isChanged:
objId = bodyCell.objectId
if objId:
if objId[0] == OPEN_ASPECT_ENTRY_SURROGATE:
bodyCell.isChanged = False # clear change flag
aspectEntryChanges[objId] = bodyCell.value
aspectEntryChangeIds = _DICT_SET(aspectEntryChanges.keys())
# check user keyed changes to facts
for bodyCell in self.gridBody.winfo_children():
if isinstance(bodyCell, gridCell) and bodyCell.isChanged:
value = bodyCell.value
objId = bodyCell.objectId
if objId:
if (objId[0] == "f" and
(bodyCell.isChanged or # change in fact value widget or any open aspect widget
self.factPrototypeAspectEntryObjectIds[objId] & aspectEntryChangeIds)):
factPrototypeIndex = int(objId[1:])
factPrototype = self.factPrototypes[factPrototypeIndex]
concept = factPrototype.concept
entityIdentScheme = self.newFactItemOptions.entityIdentScheme
entityIdentValue = self.newFactItemOptions.entityIdentValue
periodType = factPrototype.concept.periodType
periodStart = self.newFactItemOptions.startDateDate if periodType == "duration" else None
periodEndInstant = self.newFactItemOptions.endDateDate
qnameDims = factPrototype.context.qnameDims
qnameDims.update(self.newFactOpenAspects(objId))
# open aspects widgets
prevCntx = instance.matchContext(
entityIdentScheme, entityIdentValue, periodType, periodStart, periodEndInstant,
qnameDims, [], [])
if prevCntx is not None:
cntxId = prevCntx.id
else: # need new context
newCntx = instance.createContext(entityIdentScheme, entityIdentValue,
periodType, periodStart, periodEndInstant,
concept.qname, qnameDims, [], [],
afterSibling=newCntx)
cntxId = newCntx.id
# new context
if concept.isNumeric:
if concept.isMonetary:
unitMeasure = qname(XbrlConst.iso4217, self.newFactItemOptions.monetaryUnit)
unitMeasure.prefix = "iso4217" # want to save with a recommended prefix
decimals = self.newFactItemOptions.monetaryDecimals
elif concept.isShares:
unitMeasure = XbrlConst.qnXbrliShares
decimals = self.newFactItemOptions.nonMonetaryDecimals
else:
unitMeasure = XbrlConst.qnXbrliPure
decimals = self.newFactItemOptions.nonMonetaryDecimals
prevUnit = instance.matchUnit([unitMeasure],[])
if prevUnit is not None:
unitId = prevUnit.id
else:
newUnit = instance.createUnit([unitMeasure],[], afterSibling=newUnit)
unitId = newUnit.id
attrs = [("contextRef", cntxId)]
if concept.isNumeric:
attrs.append(("unitRef", unitId))
attrs.append(("decimals", decimals))
value = Locale.atof(self.modelXbrl.locale, value, str.strip)
newFact = instance.createFact(concept.qname, attributes=attrs, text=value)
bodyCell.objectId = newFact.objectId() # switch cell to now use fact ID
if self.factPrototypes[factPrototypeIndex] is not None:
self.factPrototypes[factPrototypeIndex].clear()
self.factPrototypes[factPrototypeIndex] = None #dereference fact prototype
bodyCell.isChanged = False # clear change flag
elif objId[0] != "a": # instance fact, not prototype
fact = self.modelXbrl.modelObject(objId)
if fact.concept.isNumeric:
value = Locale.atof(self.modelXbrl.locale, value, str.strip)
if fact.value != value:
if fact.concept.isNumeric and fact.isNil != (not value):
fact.isNil = not value
if value: # had been nil, now it needs decimals
fact.decimals = (self.newFactItemOptions.monetaryDecimals
if fact.concept.isMonetary else
self.newFactItemOptions.nonMonetaryDecimals)
fact.text = value
XmlValidate.validate(instance, fact)
bodyCell.isChanged = False # clear change flag
def saveInstance(self, newFilename=None, onSaved=None):
if (not self.newFactItemOptions.entityIdentScheme or # not initialized yet
not self.newFactItemOptions.entityIdentValue or
not self.newFactItemOptions.startDateDate or not self.newFactItemOptions.endDateDate):
if not getNewFactItemOptions(self.modelXbrl.modelManager.cntlr, self.newFactItemOptions):
return # new instance not set
# newFilename = None # only used when a new instance must be created
if self.modelXbrl.modelDocument.type != ModelDocument.Type.INSTANCE and newFilename is None:
newFilename = self.modelXbrl.modelManager.cntlr.fileSave(view=self, fileType="xbrl")
if not newFilename:
return # saving cancelled
# continue saving in background
thread = threading.Thread(target=lambda: self.backgroundSaveInstance(newFilename, onSaved))
thread.daemon = True
thread.start()
def backgroundSaveInstance(self, newFilename=None, onSaved=None):
cntlr = self.modelXbrl.modelManager.cntlr
if newFilename and self.modelXbrl.modelDocument.type != ModelDocument.Type.INSTANCE:
self.modelXbrl.modelManager.showStatus(_("creating new instance {0}").format(os.path.basename(newFilename)))
self.modelXbrl.modelManager.cntlr.waitForUiThreadQueue() # force status update
self.modelXbrl.createInstance(newFilename) # creates an instance as this modelXbrl's entrypoint
instance = self.modelXbrl
cntlr.showStatus(_("Saving {0}").format(instance.modelDocument.basename))
cntlr.waitForUiThreadQueue() # force status update
self.updateInstanceFromFactPrototypes()
instance.saveInstance(newFilename) # may override prior filename for instance from main menu
cntlr.showStatus(_("Saved {0}").format(instance.modelDocument.basename), clearAfter=3000)
if onSaved is not None:
self.modelXbrl.modelManager.cntlr.uiThreadQueue.put((onSaved, []))
def newFactOpenAspects(self, factObjectId):
aspectValues = {}
for aspectObjId in self.factPrototypeAspectEntryObjectIds[factObjectId]:
structuralNode = self.aspectEntryObjectIdsNode[aspectObjId]
for aspect in structuralNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
gridCell = self.aspectEntryObjectIdsCell[aspectObjId]
value = gridCell.value
# is aspect in a childStructuralNode?
if value:
aspectValue = structuralNode.aspectEntryHeaderValues.get(value)
if aspectValue is None: # try converting value
if isinstance(aspect, QName): # dimension
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
# value must be qname
aspectValue = None # need to find member for the description
else:
typedDimElement = dimConcept.typedDomainElement
aspectValue = FunctionXfi.create_element(
self.rendrCntx, None, (typedDimElement.qname, (), value))
if aspectValue is not None:
aspectValues[aspect] = aspectValue
return aspectValues
def aspectEntryValues(self, structuralNode):
for aspect in structuralNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
# if findHeader is None, return all header values in a list
# otherwise return aspect value matching header if any
depth = 0
n = structuralNode
while (n.parentStructuralNode is not None):
depth += 1
root = n = n.parentStructuralNode
headers = set()
headerValues = {}
def getHeaders(n, d):
for childStructuralNode in n.childStructuralNodes:
if d == depth:
h = childStructuralNode.header(lang=self.lang,
returnGenLabel=False,
returnMsgFormatString=False)
if not childStructuralNode.isEntryPrototype() and h:
headerValues[h] = childStructuralNode.aspectValue(aspect)
headers.add(h)
else:
getHeaders(childStructuralNode, d+1)
getHeaders(root, 1)
structuralNode.aspectEntryHeaderValues = headerValues
# is this an explicit dimension, if so add "(all members)" option at end
headersList = sorted(headers)
if isinstance(aspect, QName): # dimension
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
if headersList: # has entries, add all-memembers at end
headersList.append("(all members)")
else: # empty list, just add all members anyway
return self.explicitDimensionFilterMembers(structuralNode, structuralNode)
return headersList
def onAspectComboboxSelection(self, event):
gridCombobox = event.widget
if gridCombobox.value == "(all members)":
structuralNode = self.aspectEntryObjectIdsNode[gridCombobox.objectId]
self.comboboxLoadExplicitDimension(gridCombobox, structuralNode, structuralNode)
def comboboxLoadExplicitDimension(self, gridCombobox, structuralNode, structuralNodeWithFilter):
gridCombobox["values"] = self.explicitDimensionFilterMembers(structuralNode, structuralNodeWithFilter)
def explicitDimensionFilterMembers(self, structuralNode, structuralNodeWithFilter):
for aspect in structuralNodeWithFilter.aspectsCovered():
if isinstance(aspect, QName): # dimension
break
valueHeaders = set()
if structuralNode is not None:
headerValues = {}
# check for dimension filter(s)
dimFilterRels = structuralNodeWithFilter.definitionNode.filterRelationships
if dimFilterRels:
for rel in dimFilterRels:
dimFilter = rel.toModelObject
if dimFilter is not None:
for memberModel in dimFilter.memberProgs:
memQname = memberModel.qname
memConcept = self.modelXbrl.qnameConcepts.get(memQname)
if memConcept is not None and (not memberModel.axis or memberModel.axis.endswith('-self')):
header = memConcept.label(lang=self.lang)
valueHeaders.add(header)
if rel.isUsable:
headerValues[header] = memQname
else:
headerValues[header] = memConcept
elif memberModel.axis and memberModel.linkrole and memberModel.arcrole:
# merge of pull request 42 acsone:TABLE_Z_AXIS_DESCENDANT_OR_SELF
if memberModel.axis.endswith('-or-self'):
searchAxis = memberModel.axis[:len(memberModel.axis)-len('-or-self')]
else:
searchAxis = memberModel.axis
relationships = concept_relationships(self.rendrCntx,
None,
(memQname,
memberModel.linkrole,
memberModel.arcrole,
searchAxis),
False) # return flat list
for rel in relationships:
if rel.isUsable:
header = rel.toModelObject.label(lang=self.lang)
valueHeaders.add(header)
headerValues[header] = rel.toModelObject.qname
if not valueHeaders:
relationships = concept_relationships(self.rendrCntx,
None,
(aspect,
"XBRL-all-linkroles", # linkrole,
"XBRL-dimensions",
'descendant'),
False) # return flat list
for rel in relationships:
if (rel.arcrole in (XbrlConst.dimensionDomain, XbrlConst.domainMember)
and rel.isUsable):
header = rel.toModelObject.label(lang=self.lang)
valueHeaders.add(header)
headerValues[header] = rel.toModelObject.qname
structuralNode.aspectEntryHeaderValues = headerValues
return sorted(valueHeaders)
# import after other modules resolved to prevent circular references
from arelle.FunctionXfi import concept_relationships
|
dnschef.py
|
#!/usr/bin/env python
#
# DNSChef is a highly configurable DNS Proxy for Penetration Testers
# and Malware Analysts. Please visit http://thesprawl.org/projects/dnschef/
# for the latest version and documentation. Please forward all issues and
# concerns to iphelix [at] thesprawl.org.
DNSCHEF_VERSION = "0.3"
# Copyright (C) 2014 Peter Kacherginsky
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from optparse import OptionParser, OptionGroup
from ConfigParser import ConfigParser
from dnslib import *
from IPy import IP
from logservice import LogService
from hostsreader import HostsReader
from embeddedipresolver import EmbeddedIPResolver
import threading, random, operator, time
import SocketServer, socket, sys, os
import binascii
import base64
import traceback
# DNSHandler Mixin. The class contains generic functions to parse DNS requests and
# calculate an appropriate response based on user parameters.
class DNSHandler():
def parse(self, data):
response = ""
try:
# Parse data as DNS
d = DNSRecord.parse(data)
except Exception, e:
print "[%s] %s: ERROR: %s" % (time.strftime("%H:%M:%S"), self.client_address[0], "invalid DNS request")
if self.server.log: self.server.log.write("[%s] %s: ERROR: %s\n" % (
time.strftime("%d/%b/%Y:%H:%M:%S %z"), self.client_address[0], "invalid DNS request"))
else:
# Only Process DNS Queries
if QR[d.header.qr] == "QUERY":
# Gather query parameters
# NOTE: Do not lowercase qname here, because we want to see
# any case request weirdness in the logs.
qname = str(d.q.qname)
# Chop off the last period
if qname[-1] == '.': qname = qname[:-1]
qtype = QTYPE[d.q.qtype]
if self.server.logsvc != None:
# Find subdomain. Note that only the rightmost subdomain is important for logging purposes.
# Note that if subdomain length is less than a preset value in the app server, it will not be logged.
labels = qname.split(".")
if len(labels) > 2:
topsubdomain = labels[:-2][-1]
subs = "".join(labels[:-2][:-1])
self.server.logsvc.record_hit(topsubdomain, subs, qtype)
# Find all matching fake DNS records for the query name or get False
fake_records = dict()
for record in self.server.nametodns:
fake_records[record] = self.findnametodns(qname, self.server.nametodns[record],
self.server.embeddedipqnamelist)
# Check if there is a fake record for the current request qtype
if qtype in fake_records and fake_records[qtype]:
fake_record = fake_records[qtype]
# Create a custom response to the query
response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap, qr=1, aa=1, ra=1), q=d.q)
print "[%s] %s: cooking the response of type '%s' for %s to %s" % (
time.strftime("%H:%M:%S"), self.client_address[0], qtype, qname, fake_record)
if self.server.log: self.server.log.write(
"[%s] %s: cooking the response of type '%s' for %s to %s\n" % (
time.strftime("%d/%b/%Y:%H:%M:%S %z"), self.client_address[0], qtype, qname, fake_record))
# IPv6 needs additional work before inclusion:
if qtype == "AAAA":
ipv6 = IP(fake_record)
ipv6_bin = ipv6.strBin()
ipv6_hex_tuple = [int(ipv6_bin[i:i + 8], 2) for i in xrange(0, len(ipv6_bin), 8)]
if len(ipv6_hex_tuple) == 16:
response.add_answer(RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](ipv6_hex_tuple)))
elif qtype == "SOA":
mname, rname, t1, t2, t3, t4, t5 = fake_record.split(" ")
times = tuple([int(t) for t in [t1, t2, t3, t4, t5]])
# dnslib doesn't like trailing dots
if mname[-1] == ".": mname = mname[:-1]
if rname[-1] == ".": rname = rname[:-1]
response.add_answer(RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](mname, rname, times)))
elif qtype == "NAPTR":
order, preference, flags, service, regexp, replacement = fake_record.split(" ")
order = int(order)
preference = int(preference)
# dnslib doesn't like trailing dots
if replacement[-1] == ".": replacement = replacement[:-1]
response.add_answer(RR(qname, getattr(QTYPE, qtype),
rdata=RDMAP[qtype](order, preference, flags, service, regexp,
DNSLabel(replacement))))
elif qtype == "SRV":
priority, weight, port, target = fake_record.split(" ")
priority = int(priority)
weight = int(weight)
port = int(port)
if target[-1] == ".": target = target[:-1]
response.add_answer(
RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](priority, weight, port, target)))
elif qtype == "DNSKEY":
flags, protocol, algorithm, key = fake_record.split(" ")
flags = int(flags)
protocol = int(protocol)
algorithm = int(algorithm)
key = base64.b64decode(("".join(key)).encode('ascii'))
response.add_answer(
RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](flags, protocol, algorithm, key)))
elif qtype == "RRSIG":
covered, algorithm, labels, orig_ttl, sig_exp, sig_inc, key_tag, name, sig = fake_record.split(
" ")
covered = getattr(QTYPE, covered) # NOTE: Covered QTYPE
algorithm = int(algorithm)
labels = int(labels)
orig_ttl = int(orig_ttl)
sig_exp = int(time.mktime(time.strptime(sig_exp + 'GMT', "%Y%m%d%H%M%S%Z")))
sig_inc = int(time.mktime(time.strptime(sig_inc + 'GMT', "%Y%m%d%H%M%S%Z")))
key_tag = int(key_tag)
if name[-1] == '.': name = name[:-1]
sig = base64.b64decode(("".join(sig)).encode('ascii'))
response.add_answer(RR(qname, getattr(QTYPE, qtype),
rdata=RDMAP[qtype](covered, algorithm, labels, orig_ttl, sig_exp,
sig_inc, key_tag, name, sig)))
else:
# dnslib doesn't like trailing dots
if fake_record[-1] == ".": fake_record = fake_record[:-1]
response.add_answer(RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](fake_record)))
response = response.pack()
elif qtype == "*" and not None in fake_records.values():
print "[%s] %s: cooking the response of type '%s' for %s with %s" % (
time.strftime("%H:%M:%S"), self.client_address[0], "ANY", qname, "all known fake records.")
if self.server.log: self.server.log.write(
"[%s] %s: cooking the response of type '%s' for %s with %s\n" % (
time.strftime("%d/%b/%Y:%H:%M:%S %z"), self.client_address[0], "ANY", qname,
"all known fake records."))
response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap, qr=1, aa=1, ra=1), q=d.q)
for qtype, fake_record in fake_records.items():
if fake_record:
# NOTE: RDMAP is a dictionary map of qtype strings to handling classses
# IPv6 needs additional work before inclusion:
if qtype == "AAAA":
ipv6 = IP(fake_record)
ipv6_bin = ipv6.strBin()
fake_record = [int(ipv6_bin[i:i + 8], 2) for i in xrange(0, len(ipv6_bin), 8)]
elif qtype == "SOA":
mname, rname, t1, t2, t3, t4, t5 = fake_record.split(" ")
times = tuple([int(t) for t in [t1, t2, t3, t4, t5]])
# dnslib doesn't like trailing dots
if mname[-1] == ".": mname = mname[:-1]
if rname[-1] == ".": rname = rname[:-1]
response.add_answer(
RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](mname, rname, times)))
elif qtype == "NAPTR":
order, preference, flags, service, regexp, replacement = fake_record.split(" ")
order = int(order)
preference = int(preference)
# dnslib doesn't like trailing dots
if replacement and replacement[-1] == ".": replacement = replacement[:-1]
response.add_answer(RR(qname, getattr(QTYPE, qtype),
rdata=RDMAP[qtype](order, preference, flags, service, regexp,
replacement)))
elif qtype == "SRV":
priority, weight, port, target = fake_record.split(" ")
priority = int(priority)
weight = int(weight)
port = int(port)
if target[-1] == ".": target = target[:-1]
response.add_answer(RR(qname, getattr(QTYPE, qtype),
rdata=RDMAP[qtype](priority, weight, port, target)))
elif qtype == "DNSKEY":
flags, protocol, algorithm, key = fake_record.split(" ")
flags = int(flags)
protocol = int(protocol)
algorithm = int(algorithm)
key = base64.b64decode(("".join(key)).encode('ascii'))
response.add_answer(RR(qname, getattr(QTYPE, qtype),
rdata=RDMAP[qtype](flags, protocol, algorithm, key)))
elif qtype == "RRSIG":
covered, algorithm, labels, orig_ttl, sig_exp, sig_inc, key_tag, name, sig = fake_record.split(
" ")
covered = getattr(QTYPE, covered) # NOTE: Covered QTYPE
algorithm = int(algorithm)
labels = int(labels)
orig_ttl = int(orig_ttl)
sig_exp = int(time.mktime(time.strptime(sig_exp + 'GMT', "%Y%m%d%H%M%S%Z")))
sig_inc = int(time.mktime(time.strptime(sig_inc + 'GMT', "%Y%m%d%H%M%S%Z")))
key_tag = int(key_tag)
if name[-1] == '.': name = name[:-1]
sig = base64.b64decode(("".join(sig)).encode('ascii'))
response.add_answer(RR(qname, getattr(QTYPE, qtype),
rdata=RDMAP[qtype](covered, algorithm, labels, orig_ttl, sig_exp,
sig_inc, key_tag, name, sig)))
else:
# dnslib doesn't like trailing dots
if fake_record[-1] == ".": fake_record = fake_record[:-1]
response.add_answer(RR(qname, getattr(QTYPE, qtype), rdata=RDMAP[qtype](fake_record)))
response = response.pack()
# Proxy the request
else:
if self.server.noproxy == True:
response = None
return
print "[%s] %s: proxying the response of type '%s' for %s" % (
time.strftime("%H:%M:%S"), self.client_address[0], qtype, qname)
if self.server.log: self.server.log.write("[%s] %s: proxying the response of type '%s' for %s\n" % (
time.strftime("%d/%b/%Y:%H:%M:%S %z"), self.client_address[0], qtype, qname))
nameserver_tuple = random.choice(self.server.nameservers).split('#')
response = self.proxyrequest(data, *nameserver_tuple)
return response
# Find appropriate ip address to use for a queried name. The function can
def findnametodns(self, qname, nametodns, embeddedipqnamelist):
# Make qname case insensitive
qname = qname.lower()
# Split and reverse qname into components for matching.
qnamelist = qname.split('.')
qnamelist.reverse()
# If it MAY have an IPv4 in it, and it is r87.me domain,
if embeddedipqnamelist != None:
host = EmbeddedIPResolver.resolve(embeddedipqnamelist, qnamelist)
if host != None and len(host) > 0:
return host
# HACK: It is important to search the nametodns dictionary before iterating it so that
# global matching ['*.*.*.*.*.*.*.*.*.*'] will match last. Use sorting for that.
for domain, host in sorted(nametodns.iteritems(), key=operator.itemgetter(1)):
# NOTE: It is assumed that domain name was already lowercased
# when it was loaded through --file, --fakedomains or --truedomains
# don't want to waste time lowercasing domains on every request.
# Split and reverse domain into components for matching
domain = domain.split('.')
domain.reverse()
# Compare domains in reverse.
for a, b in map(None, qnamelist, domain):
if a != b and b != "*":
break
else:
# Could be a real IP or False if we are doing reverse matching with 'truedomains'
return host
else:
return False
# Obtain a response from a real DNS server.
def proxyrequest(self, request, host, port="53", protocol="udp"):
reply = None
try:
if self.server.ipv6:
if protocol == "udp":
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
elif protocol == "tcp":
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
if protocol == "udp":
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
elif protocol == "tcp":
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3.0)
# Send the proxy request to a randomly chosen DNS server
if protocol == "udp":
sock.sendto(request, (host, int(port)))
reply = sock.recv(1024)
sock.close()
elif protocol == "tcp":
sock.connect((host, int(port)))
# Add length for the TCP request
length = binascii.unhexlify("%04x" % len(request))
sock.sendall(length + request)
# Strip length from the response
reply = sock.recv(1024)
reply = reply[2:]
sock.close()
except Exception, e:
print "[!] Could not proxy request: %s" % e
else:
return reply
# UDP DNS Handler for incoming requests
class UDPHandler(DNSHandler, SocketServer.BaseRequestHandler):
def handle(self):
(data, socket) = self.request
response = self.parse(data)
if response:
socket.sendto(response, self.client_address)
# TCP DNS Handler for incoming requests
class TCPHandler(DNSHandler, SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
# Remove the addition "length" parameter used in the
# TCP DNS protocol
data = data[2:]
response = self.parse(data)
if response:
# Calculate and add the additional "length" parameter
# used in TCP DNS protocol
length = binascii.unhexlify("%04x" % len(response))
self.request.sendall(length + response)
class ThreadedUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
# Override SocketServer.UDPServer to add extra parameters
def __init__(self, server_address, RequestHandlerClass, nametodns, nameservers, ipv6, embeddedipqnamelist, log,
logsvc, noproxy):
self.nametodns = nametodns
self.nameservers = nameservers
self.ipv6 = ipv6
self.address_family = socket.AF_INET6 if self.ipv6 else socket.AF_INET
self.embeddedipqnamelist = embeddedipqnamelist
self.log = log
self.logsvc = logsvc
self.noproxy = noproxy;
SocketServer.UDPServer.__init__(self, server_address, RequestHandlerClass)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# Override default value
allow_reuse_address = True
# Override SocketServer.TCPServer to add extra parameters
def __init__(self, server_address, RequestHandlerClass, nametodns, nameservers, ipv6, embeddedipqnamelist, log,
logsvc, noproxy):
self.nametodns = nametodns
self.nameservers = nameservers
self.ipv6 = ipv6
self.address_family = socket.AF_INET6 if self.ipv6 else socket.AF_INET
self.embeddedipqnamelist = embeddedipqnamelist
self.log = log
self.logsvc = logsvc
self.noproxy = noproxy;
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
# Initialize and start the DNS Server
def start_cooking(interface, nametodns, nameservers, tcp=False, ipv6=False, port="53", embeddedipdomain=None,
logfile=None, loghttp=None, noproxy=False):
try:
if logfile:
log = open(logfile, 'a', 0)
log.write("[%s] DNSChef is active.\n" % (time.strftime("%d/%b/%Y:%H:%M:%S %z")))
else:
log = None
if loghttp:
logsvc = LogService(loghttp)
print "[*] loghttp is: " + loghttp
else:
logsvc = None
embeddedipqnamelist = None
if embeddedipdomain:
embeddedipqnamelist = embeddedipdomain.split('.')
embeddedipqnamelist.reverse()
if tcp:
print "[*] DNSChef is running in TCP mode"
server = ThreadedTCPServer((interface, int(port)), TCPHandler, nametodns, nameservers, ipv6,
embeddedipqnamelist, log, logsvc, noproxy)
else:
server = ThreadedUDPServer((interface, int(port)), UDPHandler, nametodns, nameservers, ipv6,
embeddedipqnamelist, log, logsvc, noproxy)
# Start a thread with the server -- that thread will then start
# more threads for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
# Loop in the main thread
while True: time.sleep(100)
except (KeyboardInterrupt, SystemExit):
if log:
log.write("[%s] DNSChef is shutting down.\n" % (time.strftime("%d/%b/%Y:%H:%M:%S %z")))
log.close()
server.shutdown()
print "[*] DNSChef is shutting down."
sys.exit()
except IOError:
print "[!] Failed to open log file for writing."
except Exception, e:
traceback.print_exc()
print "[!] Failed to start the server: %s" % e
if __name__ == "__main__":
header = " _ _ __ \n"
header += " | | version %s | | / _| \n" % DNSCHEF_VERSION
header += " __| |_ __ ___ ___| |__ ___| |_ \n"
header += " / _` | '_ \/ __|/ __| '_ \ / _ \ _|\n"
header += " | (_| | | | \__ \ (__| | | | __/ | \n"
header += " \__,_|_| |_|___/\___|_| |_|\___|_| \n"
header += " iphelix@thesprawl.org \n"
# Parse command line arguments
parser = OptionParser(usage="dnschef.py [options]:\n" + header,
description="DNSChef is a highly configurable DNS Proxy for Penetration Testers and Malware Analysts. It is capable of fine configuration of which DNS replies to modify or to simply proxy with real responses. In order to take advantage of the tool you must either manually configure or poison DNS server entry to point to DNSChef. The tool requires root privileges to run on privileged ports.")
fakegroup = OptionGroup(parser, "Fake DNS records:")
fakegroup.add_option('--fakeip', metavar="192.0.2.1", action="store",
help='IP address to use for matching DNS queries. If you use this parameter without specifying domain names, then all \'A\' queries will be spoofed. Consider using --file argument if you need to define more than one IP address.')
fakegroup.add_option('--fakeipv6', metavar="2001:db8::1", action="store",
help='IPv6 address to use for matching DNS queries. If you use this parameter without specifying domain names, then all \'AAAA\' queries will be spoofed. Consider using --file argument if you need to define more than one IPv6 address.')
fakegroup.add_option('--fakemail', metavar="mail.fake.com", action="store",
help='MX name to use for matching DNS queries. If you use this parameter without specifying domain names, then all \'MX\' queries will be spoofed. Consider using --file argument if you need to define more than one MX record.')
fakegroup.add_option('--fakealias', metavar="www.fake.com", action="store",
help='CNAME name to use for matching DNS queries. If you use this parameter without specifying domain names, then all \'CNAME\' queries will be spoofed. Consider using --file argument if you need to define more than one CNAME record.')
fakegroup.add_option('--fakens', metavar="ns.fake.com", action="store",
help='NS name to use for matching DNS queries. If you use this parameter without specifying domain names, then all \'NS\' queries will be spoofed. Consider using --file argument if you need to define more than one NS record.')
fakegroup.add_option('--file', action="store",
help="Specify a file containing a list of DOMAIN=IP pairs (one pair per line) used for DNS responses. For example: google.com=1.1.1.1 will force all queries to 'google.com' to be resolved to '1.1.1.1'. IPv6 addresses will be automatically detected. You can be even more specific by combining --file with other arguments. However, data obtained from the file will take precedence over others.")
parser.add_option_group(fakegroup)
parser.add_option('--fakedomains', metavar="thesprawl.org,google.com", action="store",
help='A comma separated list of domain names which will be resolved to FAKE values specified in the the above parameters. All other domain names will be resolved to their true values.')
parser.add_option('--truedomains', metavar="thesprawl.org,google.com", action="store",
help='A comma separated list of domain names which will be resolved to their TRUE values. All other domain names will be resolved to fake values specified in the above parameters.')
rungroup = OptionGroup(parser, "Optional runtime parameters.")
rungroup.add_option("--embeddedipdomain", action="store",
help="***Specify a lowercase domain name to enable embedded IP resolving, i.e. 127.0.0.1.<embeddedipdomain> to 127.0.0.1")
rungroup.add_option("--logfile", action="store", help="Specify a log file to record all activity")
rungroup.add_option("--loghttp", action="store", help="*** Specify an http server for request logging.")
rungroup.add_option("--noproxy", action="store_true", default=False, help="*** Disable proxying unknown names.")
rungroup.add_option("--hosts", action="store", help="*** Specify a hosts file for additional fake resolution.")
rungroup.add_option("--nameservers", metavar="8.8.8.8#53 or 4.2.2.1#53#tcp or 2001:4860:4860::8888",
default='8.8.8.8', action="store",
help='A comma separated list of alternative DNS servers to use with proxied requests. Nameservers can have either IP or IP#PORT format. A randomly selected server from the list will be used for proxy requests when provided with multiple servers. By default, the tool uses Google\'s public DNS server 8.8.8.8 when running in IPv4 mode and 2001:4860:4860::8888 when running in IPv6 mode.')
rungroup.add_option("-i", "--interface", metavar="127.0.0.1 or ::1", default="127.0.0.1", action="store",
help='Define an interface to use for the DNS listener. By default, the tool uses 127.0.0.1 for IPv4 mode and ::1 for IPv6 mode.')
rungroup.add_option("-t", "--tcp", action="store_true", default=False,
help="Use TCP DNS proxy instead of the default UDP.")
rungroup.add_option("-6", "--ipv6", action="store_true", default=False, help="Run in IPv6 mode.")
rungroup.add_option("-p", "--port", action="store", metavar="53", default="53",
help='Port number to listen for DNS requests.')
rungroup.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True, help="Don't show headers.")
parser.add_option_group(rungroup)
(options, args) = parser.parse_args()
# Print program header
if options.verbose:
print header
# Main storage of domain filters
# NOTE: RDMAP is a dictionary map of qtype strings to handling classes
nametodns = dict()
for qtype in RDMAP.keys():
nametodns[qtype] = dict()
# Incorrect or incomplete command line arguments
if options.fakedomains and options.truedomains:
print "[!] You can not specify both 'fakedomains' and 'truedomains' parameters."
sys.exit(0)
elif not (options.fakeip or options.fakeipv6) and (options.fakedomains or options.truedomains):
print "[!] You have forgotten to specify which IP to use for fake responses"
sys.exit(0)
# Notify user about alternative listening port
if options.port != "53":
print "[*] Listening on an alternative port %s" % options.port
# Adjust defaults for IPv6
if options.ipv6:
print "[*] Using IPv6 mode."
if options.interface == "127.0.0.1":
options.interface = "::1"
if options.nameservers == "8.8.8.8":
options.nameservers = "2001:4860:4860::8888"
print "[*] DNSChef started on interface: %s " % options.interface
# Use alternative DNS servers
if options.nameservers:
nameservers = options.nameservers.split(',')
print "[*] Using the following nameservers: %s" % ", ".join(nameservers)
# External file definitions
if options.file:
config = ConfigParser()
config.read(options.file)
for section in config.sections():
if section in nametodns:
for domain, record in config.items(section):
# Make domain case insensitive
domain = domain.lower()
nametodns[section][domain] = record
print "[+] Cooking %s replies for domain %s with '%s'" % (section, domain, record)
else:
print "[!] DNS Record '%s' is not supported. Ignoring section contents." % section
# DNS Record and Domain Name definitions
# NOTE: '*.*.*.*.*.*.*.*.*.*' domain is used to match all possible queries.
if options.fakeip or options.fakeipv6 or options.fakemail or options.fakealias or options.fakens:
fakeip = options.fakeip
fakeipv6 = options.fakeipv6
fakemail = options.fakemail
fakealias = options.fakealias
fakens = options.fakens
if options.fakedomains:
for domain in options.fakedomains.split(','):
# Make domain case insensitive
domain = domain.lower()
domain = domain.strip()
if fakeip:
nametodns["A"][domain] = fakeip
print "[*] Cooking A replies to point to %s matching: %s" % (options.fakeip, domain)
if fakeipv6:
nametodns["AAAA"][domain] = fakeipv6
print "[*] Cooking AAAA replies to point to %s matching: %s" % (options.fakeipv6, domain)
if fakemail:
nametodns["MX"][domain] = fakemail
print "[*] Cooking MX replies to point to %s matching: %s" % (options.fakemail, domain)
if fakealias:
nametodns["CNAME"][domain] = fakealias
print "[*] Cooking CNAME replies to point to %s matching: %s" % (options.fakealias, domain)
if fakens:
nametodns["NS"][domain] = fakens
print "[*] Cooking NS replies to point to %s matching: %s" % (options.fakens, domain)
elif options.truedomains:
for domain in options.truedomains.split(','):
# Make domain case insensitive
domain = domain.lower()
domain = domain.strip()
if fakeip:
nametodns["A"][domain] = False
print "[*] Cooking A replies to point to %s not matching: %s" % (options.fakeip, domain)
nametodns["A"]['*.*.*.*.*.*.*.*.*.*'] = fakeip
if fakeipv6:
nametodns["AAAA"][domain] = False
print "[*] Cooking AAAA replies to point to %s not matching: %s" % (options.fakeipv6, domain)
nametodns["AAAA"]['*.*.*.*.*.*.*.*.*.*'] = fakeipv6
if fakemail:
nametodns["MX"][domain] = False
print "[*] Cooking MX replies to point to %s not matching: %s" % (options.fakemail, domain)
nametodns["MX"]['*.*.*.*.*.*.*.*.*.*'] = fakemail
if fakealias:
nametodns["CNAME"][domain] = False
print "[*] Cooking CNAME replies to point to %s not matching: %s" % (options.fakealias, domain)
nametodns["CNAME"]['*.*.*.*.*.*.*.*.*.*'] = fakealias
if fakens:
nametodns["NS"][domain] = False
print "[*] Cooking NS replies to point to %s not matching: %s" % (options.fakens, domain)
nametodns["NS"]['*.*.*.*.*.*.*.*.*.*'] = fakealias
else:
# NOTE: '*.*.*.*.*.*.*.*.*.*' domain is a special ANY domain
# which is compatible with the wildflag algorithm above.
if fakeip:
nametodns["A"]['*.*.*.*.*.*.*.*.*.*'] = fakeip
print "[*] Cooking all A replies to point to %s" % fakeip
if fakeipv6:
nametodns["AAAA"]['*.*.*.*.*.*.*.*.*.*'] = fakeipv6
print "[*] Cooking all AAAA replies to point to %s" % fakeipv6
if fakemail:
nametodns["MX"]['*.*.*.*.*.*.*.*.*.*'] = fakemail
print "[*] Cooking all MX replies to point to %s" % fakemail
if fakealias:
nametodns["CNAME"]['*.*.*.*.*.*.*.*.*.*'] = fakealias
print "[*] Cooking all CNAME replies to point to %s" % fakealias
if fakens:
nametodns["NS"]['*.*.*.*.*.*.*.*.*.*'] = fakens
print "[*] Cooking all NS replies to point to %s" % fakens
# Handle hosts.
if options.hosts:
hosts = HostsReader.read_all(options.hosts);
for name, ipv4 in hosts.iteritems():
print "[*] Cooking A replies to point to %s matching: %s" % (ipv4, name)
nametodns["A"][name] = ipv4
if len(hosts) == 0:
print "[*] hosts option specified but file cannot be read or is empty."
# Proxy all DNS requests
if not options.fakeip and not options.fakeipv6 and not options.fakemail and not options.fakealias and not options.fakens and not options.file and not options.hosts:
print "[*] No parameters were specified. Running in full proxy mode"
# Launch DNSChef
start_cooking(interface=options.interface, nametodns=nametodns, nameservers=nameservers, tcp=options.tcp,
ipv6=options.ipv6, port=options.port, embeddedipdomain=options.embeddedipdomain,
logfile=options.logfile, loghttp=options.loghttp, noproxy=options.noproxy)
|
train.py
|
#!/usr/bin/env python
"""Train models."""
import os
import glob
import numpy as np
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.logging import logger
from onmt.train_single import main as single_main
from onmt.utils.parse import ArgumentParser
def main(opt):
ArgumentParser.validate_train_opts(opt)
ArgumentParser.update_model_opts(opt)
ArgumentParser.validate_model_opts(opt)
nb_gpu = len(opt.gpu_ranks)
if opt.world_size > 1:
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
procs.append(mp.Process(target=run, args=(
opt, device_id, error_queue, ), daemon=True))
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
for p in procs:
p.join()
elif nb_gpu == 1: # case 1 GPU only
single_main(opt, 0)
else: # case only CPU
single_main(opt, -1)
def run(opt, device_id, error_queue):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def _get_parser():
parser = ArgumentParser(description='train.py')
opts.config_opts(parser)
opts.model_opts(parser)
opts.train_opts(parser)
return parser
if __name__ == "__main__":
parser = _get_parser()
opt = parser.parse_args()
if opt.config is None or opt.run_name is None:
raise ValueError('base config and run_name must be set during training')
config_name = opt.config.split('/')[-1]
config_name = ''.join(config_name.split('.')[:-1])
dataset_name = opt.data.split('/')[-2]+'_'+opt.data.split('/')[-1]
output_dir = 'output/'+dataset_name+'/'+config_name+'_'+opt.run_name+'/'
os.makedirs(output_dir, exist_ok=True)
# setattr(opt, 'save_model', output_dir+'checkpoints/model')
setattr(opt, 'save_config', output_dir+'config.yml')
setattr(opt, 'tensorboard_log_dir', 'output/'+dataset_name+'/tblogs/'+config_name+'_'+opt.run_name)
parser.write_config_file(opt, [output_dir+'config.yml'])
if opt.autorestart:
filenames = []
step_nums = []
for filename in glob.glob(output_dir+'checkpoints/*.pt'):
filenames.append(filename)
step_num = os.path.basename(filename).split('_')[-1][:-3]
step_nums.append(int(step_num))
if len(filenames) > 0:
indices = np.argsort(step_nums)
filenames = np.array(filenames)[indices]
opt.train_from = filenames[-1]
opt.gpt2_init_embanddec = False
opt.encoder_from = None
opt.gpt2_params_path = None
main(opt)
|
master.py
|
from builtins import str
from builtins import range
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import multiprocessing
import os, sys, time
from config import config, log_config
import util
AGENT_COUNT = config["agent_config"]["count"]
EVALUATOR_COUNT = config["evaluator_config"]["count"]
MODEL_AUGMENTED = config["model_config"] is not False
if config["resume"]:
ROOT_PATH = "output/" + config["env"]["name"] + "/" + config["name"]
else:
ROOT_PATH = util.create_and_wipe_directory("output/" + config["env"]["name"] + "/" + config["name"])
log_config()
import learner, agent, valuerl_learner
if MODEL_AUGMENTED: import worldmodel_learner
if __name__ == '__main__':
all_procs = set([])
interaction_procs = set([])
# lock
policy_lock = multiprocessing.Lock()
model_lock = multiprocessing.Lock() if MODEL_AUGMENTED else None
# queue
policy_replay_frame_queue = multiprocessing.Queue(1)
model_replay_frame_queue = multiprocessing.Queue(1) if MODEL_AUGMENTED else None
# interactors
for interact_proc_i in range(AGENT_COUNT):
interact_proc = multiprocessing.Process(target=agent.main, args=(interact_proc_i, False, policy_replay_frame_queue, model_replay_frame_queue, policy_lock, config))
all_procs.add(interact_proc)
interaction_procs.add(interact_proc)
# evaluators
for interact_proc_i in range(EVALUATOR_COUNT):
interact_proc = multiprocessing.Process(target=agent.main, args=(interact_proc_i, True, policy_replay_frame_queue, model_replay_frame_queue, policy_lock, config))
all_procs.add(interact_proc)
interaction_procs.add(interact_proc)
# policy training
train_policy_proc = multiprocessing.Process(target=learner.run_learner, args=(valuerl_learner.ValueRLLearner, policy_replay_frame_queue, policy_lock, config, config["env"], config["policy_config"]), kwargs={"model_lock": model_lock})
all_procs.add(train_policy_proc)
# model training
if MODEL_AUGMENTED:
train_model_proc = multiprocessing.Process(target=learner.run_learner, args=(worldmodel_learner.WorldmodelLearner, model_replay_frame_queue, model_lock, config, config["env"], config["model_config"]))
all_procs.add(train_model_proc)
# start all policies
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
for i, proc in enumerate(interaction_procs):
os.environ['CUDA_VISIBLE_DEVICES'] = ''
proc.start()
os.environ['CUDA_VISIBLE_DEVICES'] = str(int(sys.argv[2]))
train_policy_proc.start()
if MODEL_AUGMENTED:
os.environ['CUDA_VISIBLE_DEVICES'] = str(1+int(sys.argv[2]))
train_model_proc.start()
while True:
try:
pass
except:
for proc in all_procs: proc.join()
|
event2.py
|
import random
import threading
import time
fila = []
resultado = []
evento = threading.Event()
def Consumidor():
global fila, resultado
while True:
try:
if len(fila)<2:
evento.set()
else:
evento.clear()
x = fila.pop(0)
print('\nCONSUMIDOR: processando tarefa',x)
time.sleep(2)
resultado.append(x)
except:
time.sleep(2)
if len(fila) == 0:
break
def Produtor():
global fila, resultado
for i in range(10):
if len(fila)<2:
evento.set()
else:
evento.clear()
evento.wait()
fila.append(i)
tempo=random.random()
time.sleep(tempo)
print("\nPRODUTOR:",tempo)
print('PRODUTOR: tarefas pendentes:',len(fila),fila)
while True:
print('PRODUTOR: tarefas terminadas:',len(resultado),resultado)
if len(fila) == 0:
break
time.sleep(1)
def Produtor2():
global fila, resultado
for i in range(11,20):
if len(fila)<2:
evento.set()
else:
evento.clear()
evento.wait()
fila.append(i)
tempo=random.random()
time.sleep(tempo)
print("\nPRODUTOR2:",tempo)
print('PRODUTOR2: tarefas pendentes:',len(fila),fila)
while True:
print('PRODUTOR2: tarefas terminadas:',len(resultado),resultado)
if len(fila) == 0:
break
time.sleep(1)
if __name__=="__main__":
start = time.time()
t1 = threading.Thread(target=Consumidor)
t2 = threading.Thread(target=Produtor)
t3 = threading.Thread(target=Produtor2)
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
end = time.time()
print('Time taken in seconds -', end - start)
|
threading_server.py
|
import threading
import socket
import json
from .Shared import tcp_recv, tcp_send, recv_task, send_task
from itertools import chain
from queue import Queue, Empty
try:
import SharedData
except ImportError:
from os import getcwd
from sys import path
path.append(getcwd() + "/..")
import SharedData
# TODO: change to logging instead of print
# find port with this Power-shell script
# Get-Process -Id (Get-NetTCPConnection -LocalPort 80).OwningProcess
# Main connection start
def get_main_connection(config):
ip = SharedData.get_external_ip()
s_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
port = int(input("Enter primary server port >> "))
# port = 80
if port > 65535:
raise ValueError
except ValueError:
print("Wrong port value!")
continue
print(f"[S][INFO] Connect client to: {ip}:{port}")
try:
s_sock.bind(("", port))
except OSError:
print(SharedData.red(f"[S][CRIT] Cannot open server at port."))
raise
else:
conn, addr = s_sock.accept() # block until client signal
return conn, addr
def generate_queue(max_port: int):
print(f"Generating Queue from 1~{max_port}.")
q = Queue()
for i in range(1, max_port + 1):
q.put(i)
return q
def run_workers(
config,
works: Queue,
send: Queue,
recv: Queue,
in_use: Queue,
blocked: Queue,
e: threading.Event,
):
delimiter = config.READ_UNTIL.encode(config.ENCODING)
excl = set(config.EXCLUDE)
workers = [
threading.Thread(
target=worker,
args=[
i,
works,
send,
recv,
excl,
in_use,
blocked,
e,
delimiter,
config.TIMEOUT,
],
)
for i in range(config.WORKERS)
]
for w in workers:
w.start()
for w in workers:
w.join()
print(SharedData.bold("[S][info] All workers stopped."))
def worker(
id_,
task_q,
send,
recv,
exclude: set,
used: Queue,
blocked: Queue,
event: threading.Event,
delimiter: bytes,
timeout=None,
):
try:
while not task_q.empty() and not event.is_set():
# receive worker announcement.
try:
worker_id = recv.get(timeout=timeout)
recv.task_done()
except Empty:
print(SharedData.red(f"[SS{id_:2}][Warn] Timeout."))
continue
print(f"[SS{id_:2}][INFO] Worker {worker_id} available.")
# get next work.
print(f"[SS{id_:2}][INFO] Getting new port.")
# if timeout getting port, either task is empty or just coroutine delayed.
try:
p: int = task_q.get(timeout=timeout)
task_q.task_done()
except Empty:
if task_q.empty():
break
else:
await recv.put(worker_id)
continue
# put back in and run again.
# check if port is in blacklist.
if p in exclude:
print(SharedData.cyan(f"[SS{id_:2}][INFO] Skipping Port {p}."))
continue
print(f"[SS{id_:2}][INFO] Sending port {p} to client.")
send.put(p)
print(f"[SS{id_:2}][INFO] Trying to serve port {p}.")
child_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
child_sock.settimeout(timeout)
try:
child_sock.bind(("", p))
child_sock.listen(1)
child_sock.accept()
except socket.timeout:
print(SharedData.red(f"[SS{id_:2}][Warn] Port {p} timeout."))
blocked.put(p)
except OSError:
print(SharedData.red(f"[SS{id_:2}][Warn] Port {p} in use."))
used.put(p)
else:
print(SharedData.green(f"[SS{id_:2}][Info] Port {p} is open."))
finally:
child_sock.close()
# Send end signal to client.
# first worker catching this signal will go offline.
print(SharedData.cyan(f"[SS{id_:2}][INFO] Done. Sending stop signal."))
await send.put("DONE") # causing int type-error on client side workers.
except Exception:
# trigger event to stop all threads.
print(SharedData.red(f"[SS{id_:2}][CRIT] Exception Event set!."))
event.set()
raise
if event.is_set():
print(SharedData.bold(f"[SS{id_:2}][WARN] Task Finished by event."))
else:
print(SharedData.bold(f"[SS{id_:2}][INFO] Task Finished."))
def main():
config = SharedData.load_json_config()
conn, addr = get_main_connection(config)
# Send config to client.
tcp_send(SharedData.load_config_raw(), conn)
work = generate_queue(config.PORT_MAX)
delimiter = config.READ_UNTIL.encode(config.ENCODING)
start_event = threading.Event()
stop_event = threading.Event()
send_q = Queue()
recv_q = Queue()
in_use = Queue()
failed = Queue()
server_thread = [
threading.Thread(target=send_task, args=[send_q, stop_event, delimiter,]),
threading.Thread(target=recv_task, args=[recv_q, stop_event]),
]
for t in server_thread:
t.start()
run_workers(config, work, send_q, recv_q, in_use, failed, stop_event)
event.set()
print(SharedData.bold("[S][Info] All workers stopped."))
# send stop signal to client side RECV
print(SharedData.bold("[S][info] Sending kill signal to client RECV."))
conn.send(config.END_MARK)
for t in server_thread:
t.join()
print("[S][info] RECV/SEND stopped.")
# sending pickled port results
print(f"[S][Info] Sending port data.")
used_data = json.dumps([USED_PORTS, SHUT_PORTS])
if conn.send(used_data.encode(config.ENCODING) + config.END_MARK):
print(f"[S][CRIT] Socket connection broken.")
conn.close()
print("\n[Results]")
print(f"Used Ports : {USED_PORTS}")
print(f"Closed Ports: {SHUT_PORTS}")
print(f"Excluded : {config.EXCLUDE}")
print(f"\nAll other ports from 1~{config.PORT_MAX} is open.")
if __name__ == "__main__":
main()
|
mininet_tests.py
|
#!/usr/bin/env python3
"""Mininet tests for FAUCET."""
# pylint: disable=too-many-lines
# pylint: disable=missing-docstring
# pylint: disable=too-many-arguments
# pylint: disable=unbalanced-tuple-unpacking
from functools import partial
import binascii
import collections
import copy
import itertools
import ipaddress
import json
import os
import random
import re
import shutil
import socket
import threading
import time
import unittest
from http.server import SimpleHTTPRequestHandler
from http.server import HTTPServer
import scapy.all
import yaml # pytype: disable=pyi-error
from mininet.log import error
from mininet.util import pmonitor
from clib import mininet_test_base
from clib import mininet_test_util
from clib import mininet_test_topo
from clib.mininet_test_base import PEER_BGP_AS, IPV4_ETH, IPV6_ETH
MIN_MBPS = 100
CONFIG_BOILER_UNTAGGED = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
CONFIG_TAGGED_BOILER = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
class QuietHTTPServer(HTTPServer):
allow_reuse_address = True
timeout = None
@staticmethod
def handle_error(_request, _client_address):
return
class PostHandler(SimpleHTTPRequestHandler):
@staticmethod
def log_message(_format, *_args):
return
def _log_post(self):
content_len = int(self.headers.get('content-length', 0))
content = self.rfile.read(content_len).decode().strip()
if content and hasattr(self.server, 'influx_log'):
with open(self.server.influx_log, 'a') as influx_log:
influx_log.write(content + '\n')
class InfluxPostHandler(PostHandler):
def do_POST(self): # pylint: disable=invalid-name
self._log_post()
return self.send_response(204)
class SlowInfluxPostHandler(PostHandler):
def do_POST(self): # pylint: disable=invalid-name
self._log_post()
time.sleep(self.server.timeout * 3)
return self.send_response(500)
class FaucetTest(mininet_test_base.FaucetTestBase):
pass
class FaucetUntaggedTest(FaucetTest):
"""Basic untagged VLAN test."""
HOST_NAMESPACE = {}
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
# pylint: disable=invalid-name
CONFIG = CONFIG_BOILER_UNTAGGED
EVENT_LOGGER_TIMEOUT = 120 # Timeout for event logger process
def setUp(self): # pylint: disable=invalid-name
super(FaucetUntaggedTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid,
host_namespace=self.HOST_NAMESPACE)
self.start_net()
def verify_events_log(self, event_log, timeout=10):
required_events = set(['CONFIG_CHANGE', 'PORT_CHANGE', 'L2_LEARN', 'PORTS_STATUS'])
for _ in range(timeout):
prom_event_id = self.scrape_prometheus_var('faucet_event_id', dpid=False)
event_id = None
with open(event_log, 'r') as event_log_file:
for event_log_line in event_log_file.readlines():
event = json.loads(event_log_line.strip())
event_id = event['event_id']
for required_event in list(required_events):
if required_event in required_events:
required_events.remove(required_event)
break
if prom_event_id == event_id:
return
time.sleep(1)
self.assertEqual(prom_event_id, event_id)
self.assertFalse(required_events)
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
event_log = os.path.join(self.tmpdir, 'event.log')
controller = self._get_controller()
sock = self.env['faucet']['FAUCET_EVENT_SOCK']
# Relying on a timeout seems a bit brittle;
# as an alternative we might possibly use something like
# `with popen(cmd...) as proc` to clean up on exceptions
controller.cmd(mininet_test_util.timeout_cmd(
'nc -U %s > %s &' % (sock, event_log), self.EVENT_LOGGER_TIMEOUT))
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.verify_traveling_dhcp_mac()
self.gauge_smoke_test()
self.prometheus_smoke_test()
self.assertGreater(os.path.getsize(event_log), 0)
controller.cmd(
mininet_test_util.timeout_cmd(
'nc -U %s' % sock, 10))
self.verify_events_log(event_log)
class Faucet8021XBaseTest(FaucetTest):
HOST_NAMESPACE = {3: False}
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
RADIUS_PORT = None
DOT1X_EXPECTED_EVENTS = []
SESSION_TIMEOUT = 3600
LOG_LEVEL = 'DEBUG'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: 100
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="user"
password="microphone"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="admin"
password="megaphone"
}
"""
freeradius_user_conf = """user Cleartext-Password := "microphone"
Session-timeout = {0}
admin Cleartext-Password := "megaphone"
Session-timeout = {0}
vlanuser1001 Cleartext-Password := "password"
Tunnel-Type = "VLAN",
Tunnel-Medium-Type = "IEEE-802",
Tunnel-Private-Group-id = "radiusassignedvlan1"
vlanuser2222 Cleartext-Password := "milliphone"
Tunnel-Type = "VLAN",
Tunnel-Medium-Type = "IEEE-802",
Tunnel-Private-Group-id = "radiusassignedvlan2"
filter_id_user_accept Cleartext-Password := "accept_pass"
Filter-Id = "accept_acl"
filter_id_user_deny Cleartext-Password := "deny_pass"
Filter-Id = "deny_acl"
"""
eapol1_host = None
eapol2_host = None
ping_host = None
nfv_host = None
nfv_intf = None
nfv_portno = None
event_log = ''
@staticmethod
def _priv_mac(host_id):
two_byte_port_num = '%04x' % host_id
two_byte_port_num_formatted = ':'.join((two_byte_port_num[:2], two_byte_port_num[2:]))
return '00:00:00:00:%s' % two_byte_port_num_formatted
def _init_faucet_config(self):
self.eapol1_host, self.eapol2_host, self.ping_host, self.nfv_host = self.hosts_name_ordered()
switch = self.first_switch()
last_host_switch_link = switch.connectionsTo(self.nfv_host)[0]
nfv_intf = [
intf for intf in last_host_switch_link if intf in switch.intfList()][0]
self.nfv_intf = str(nfv_intf)
nfv_intf = self.nfv_host.intf()
self.RADIUS_PORT = mininet_test_util.find_free_udp_port(self.ports_sock, self._test_name())
self.CONFIG = self.CONFIG.replace('NFV_INTF', str(nfv_intf))
self.CONFIG = self.CONFIG.replace('RADIUS_PORT', str(self.RADIUS_PORT))
super(Faucet8021XBaseTest, self)._init_faucet_config()
def setUp(self):
super(Faucet8021XBaseTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid,
host_namespace=self.HOST_NAMESPACE)
self.start_net()
self.nfv_portno = self.port_map['port_4']
self.host_drop_all_ips(self.nfv_host)
self.nfv_pids = []
tcpdump_args = '-e -n -U'
self.eapol1_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -w %s/%s-start.pcap %s ether proto 0x888e &' % (
self.tmpdir, self.eapol1_host.name, tcpdump_args), 300))
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -i %s-eth0 -w %s/eap-lo.pcap %s ether proto 0x888e &' % (
self.nfv_host.name, self.tmpdir, tcpdump_args), 300))
self.nfv_pids.append(int(self.nfv_host.lastPid))
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -i lo -w %s/radius.pcap %s udp port %d &' % (
self.tmpdir, tcpdump_args, self.RADIUS_PORT), 300))
self.nfv_pids.append(int(self.nfv_host.lastPid))
self.radius_log_path = self.start_freeradius()
self.nfv_pids.append(int(self.nfv_host.lastPid))
self.event_log = os.path.join(self.tmpdir, 'event.log')
controller = self._get_controller()
sock = self.env['faucet']['FAUCET_EVENT_SOCK']
controller.cmd(
mininet_test_util.timeout_cmd(
'nc -U %s > %s &' % (sock, self.event_log), 300))
def tearDown(self):
for pid in self.nfv_pids:
self.nfv_host.cmd('kill %u' % pid)
super(Faucet8021XBaseTest, self).tearDown()
def post_test_checks(self):
self.assertGreater(os.path.getsize(self.event_log), 0)
self.verify_dot1x_events_log()
def verify_dot1x_events_log(self):
def replace_mac(host_no):
replacement_macs = {
'HOST1_MAC': self.eapol1_host.MAC(),
'HOST2_MAC': self.eapol2_host.MAC(),
'HOST3_MAC': self.ping_host.MAC(),
'HOST4_MAC': self.nfv_host.MAC(),
}
return replacement_macs.get(host_no, None)
def insert_dynamic_values(dot1x_expected_events):
for dot1x_event in dot1x_expected_events:
top_level_key = list(dot1x_event.keys())[0]
l = [('dp_id', int(self.dpid))]
for k, v in dot1x_event[top_level_key].items():
if k == 'port':
l.append((k, self.port_map[v]))
if k == 'eth_src':
l.append((k, replace_mac(v)))
for k, v in l:
dot1x_event[top_level_key][k] = v
if not self.DOT1X_EXPECTED_EVENTS:
return
dot1x_expected_events = copy.deepcopy(self.DOT1X_EXPECTED_EVENTS)
insert_dynamic_values(dot1x_expected_events)
with open(self.event_log, 'r') as event_file:
events_that_happened = []
for event_log_line in event_file.readlines():
if 'DOT1X' not in event_log_line:
continue
event = json.loads(event_log_line.strip())
events_that_happened.append(event['DOT1X'])
for expected_event in dot1x_expected_events:
self.assertTrue(expected_event in events_that_happened,
msg='expected event: {} not in events_that_happened {}'.format(
expected_event, events_that_happened))
def try_8021x(self, host, port_num, conf, and_logoff=False, terminate_wpasupplicant=False,
wpasup_timeout=180, tcpdump_timeout=15, tcpdump_packets=10,
expect_success=True):
if expect_success:
self.wait_8021x_flows(port_num)
port_labels = self.port_labels(port_num)
success_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels, default=0)
failure_total = self.scrape_prometheus_var(
'port_dot1x_failure_total', labels=port_labels, default=0)
logoff_total = self.scrape_prometheus_var(
'port_dot1x_logoff_total', labels=port_labels, default=0)
dp_success_total = self.scrape_prometheus_var(
'dp_dot1x_success_total', default=0)
dp_failure_total = self.scrape_prometheus_var(
'dp_dot1x_failure_total', default=0)
dp_logoff_total = self.scrape_prometheus_var(
'dp_dot1x_logoff_total', default=0)
tcpdump_filter = 'ether proto 0x888e'
tcpdump_txt = self.tcpdump_helper(
host, tcpdump_filter, [
lambda: self.wpa_supplicant_callback(
host, port_num, conf, and_logoff,
timeout=wpasup_timeout,
terminate_wpasupplicant=terminate_wpasupplicant)],
timeout=tcpdump_timeout, vflags='-vvv', packets=tcpdump_packets)
if expect_success:
self.wait_for_eap_success(host, self.get_wpa_ctrl_path(host))
if not and_logoff:
self.wait_8021x_success_flows(host, port_num)
success = 'Success' in tcpdump_txt
new_success_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels, default=0)
new_failure_total = self.scrape_prometheus_var(
'port_dot1x_failure_total', labels=port_labels, default=0)
new_logoff_total = self.scrape_prometheus_var(
'port_dot1x_logoff_total', labels=port_labels, default=0)
new_dp_success_total = self.scrape_prometheus_var(
'dp_dot1x_success_total', default=0)
new_dp_failure_total = self.scrape_prometheus_var(
'dp_dot1x_failure_total', default=0)
new_dp_logoff_total = self.scrape_prometheus_var(
'dp_dot1x_logoff_total', default=0)
if expect_success != success:
return False
if expect_success and success:
self.assertGreater(new_success_total, success_total)
self.assertGreater(new_dp_success_total, dp_success_total)
self.assertEqual(failure_total, new_failure_total)
self.assertEqual(dp_failure_total, new_dp_failure_total)
logoff = 'logoff' in tcpdump_txt
if logoff != and_logoff:
return False
if and_logoff:
self.assertGreater(new_logoff_total, logoff_total)
return True
self.assertEqual(logoff_total, new_logoff_total)
self.assertEqual(dp_logoff_total, new_dp_logoff_total)
self.assertEqual(dp_success_total, new_dp_success_total)
self.assertGreaterEqual(new_failure_total, failure_total)
self.assertGreaterEqual(new_dp_failure_total, dp_failure_total)
return False
def retry_8021x(self, host, port_num, conf, and_logoff=False, retries=2, expect_success=True):
for _ in range(retries):
if self.try_8021x(host, port_num, conf, and_logoff, expect_success=expect_success):
return True
time.sleep(1)
return False
def wait_8021x_flows(self, port_no):
port_actions = [
'SET_FIELD: {eth_dst:%s}' % self._priv_mac(port_no), 'OUTPUT:%u' % self.nfv_portno]
from_nfv_actions = [
'SET_FIELD: {eth_src:01:80:c2:00:00:03}', 'OUTPUT:%d' % port_no]
from_nfv_match = {
'in_port': self.nfv_portno, 'dl_src': self._priv_mac(port_no), 'dl_type': 0x888e}
self.wait_until_matching_flow(None, table_id=0, actions=port_actions)
self.wait_until_matching_flow(from_nfv_match, table_id=0, actions=from_nfv_actions)
def wait_8021x_success_flows(self, host, port_no):
from_host_actions = [
'GOTO_TABLE:1']
from_host_match = {
'in_port': port_no, 'dl_src': host.MAC()}
self.wait_until_matching_flow(from_host_match, table_id=0, actions=from_host_actions)
def verify_host_success(self, eapol_host, port_no, wpasupplicant_conf, and_logoff):
self.one_ipv4_ping(
eapol_host, self.ping_host.IP(), require_host_learned=False, expected_result=False)
self.assertTrue(
self.try_8021x(
eapol_host, port_no, wpasupplicant_conf, and_logoff=and_logoff))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(), require_host_learned=False, expected_result=True)
def wpa_supplicant_callback(self, host, port_num, conf, and_logoff, timeout=10, terminate_wpasupplicant=False):
wpa_ctrl_path = self.get_wpa_ctrl_path(host)
if os.path.exists(wpa_ctrl_path):
self.terminate_wpasupplicant(host)
for pid in host.cmd('lsof -t %s' % wpa_ctrl_path).splitlines():
try:
os.kill(int(pid), 15)
except (ValueError, ProcessLookupError):
pass
try:
shutil.rmtree(wpa_ctrl_path)
except FileNotFoundError:
pass
log_prefix = host.name + '_'
self.start_wpasupplicant(
host, conf, timeout=timeout,
wpa_ctrl_socket_path=wpa_ctrl_path, log_prefix=log_prefix)
if and_logoff:
self.wait_for_eap_success(host, wpa_ctrl_path)
self.wait_until_matching_flow(
{'eth_src': host.MAC(), 'in_port': port_num}, table_id=0)
self.one_ipv4_ping(
host, self.ping_host.IP(), require_host_learned=False)
host.cmd('wpa_cli -p %s logoff' % wpa_ctrl_path)
self.wait_until_no_matching_flow(
{'eth_src': host.MAC(), 'in_port': port_num}, table_id=0)
self.one_ipv4_ping(
host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
if terminate_wpasupplicant:
self.terminate_wpasupplicant(host)
def terminate_wpasupplicant(self, host):
wpa_ctrl_path = self.get_wpa_ctrl_path(host)
host.cmd('wpa_cli -p %s terminate' % wpa_ctrl_path)
def get_wpa_ctrl_path(self, host):
wpa_ctrl_path = os.path.join(
self.tmpdir, '%s/%s-wpasupplicant' % (self.tmpdir, host.name))
return wpa_ctrl_path
@staticmethod
def get_wpa_status(host, wpa_ctrl_path):
status = host.cmdPrint('wpa_cli -p %s status' % wpa_ctrl_path)
for line in status.splitlines():
if line.startswith('EAP state'):
return line.split('=')[1].strip()
return None
def wait_for_eap_success(self, host, wpa_ctrl_path, timeout=5):
for _ in range(timeout):
eap_state = self.get_wpa_status(host, wpa_ctrl_path)
if eap_state == 'SUCCESS':
return
time.sleep(1)
self.fail('did not get EAP success: %s' % eap_state)
def wait_for_radius(self, radius_log_path):
self.wait_until_matching_lines_from_file(
r'.*Ready to process requests', radius_log_path)
def start_freeradius(self):
radius_log_path = '%s/radius.log' % self.tmpdir
listen_match = r'(listen {[^}]*(limit {[^}]*})[^}]*})|(listen {[^}]*})'
listen_config = """listen {
type = auth
ipaddr = *
port = %s
}
listen {
type = acct
ipaddr = *
port = %d
}""" % (self.RADIUS_PORT, self.RADIUS_PORT + 1)
if os.path.isfile('/etc/freeradius/users'):
# Assume we are dealing with freeradius 2 configuration
shutil.copytree('/etc/freeradius/', '%s/freeradius' % self.tmpdir)
users_path = '%s/freeradius/users' % self.tmpdir
with open('%s/freeradius/radiusd.conf' % self.tmpdir, 'r+') as default_site:
default_config = default_site.read()
default_config = re.sub(listen_match, '', default_config)
default_site.seek(0)
default_site.write(default_config)
default_site.write(listen_config)
default_site.truncate()
else:
# Assume we are dealing with freeradius >=3 configuration
freerad_version = os.popen(
r'freeradius -v | egrep -o -m 1 "Version ([0-9]\.[0.9])"').read().rstrip()
freerad_major_version = freerad_version.split(' ')[1]
shutil.copytree('/etc/freeradius/%s/' % freerad_major_version,
'%s/freeradius' % self.tmpdir)
users_path = '%s/freeradius/mods-config/files/authorize' % self.tmpdir
with open('%s/freeradius/sites-enabled/default' % self.tmpdir, 'r+') as default_site:
default_config = default_site.read()
default_config = re.sub(
listen_match, '', default_config)
default_config = re.sub(
r'server default {', 'server default {\n'+listen_config, default_config)
default_site.seek(0)
default_site.write(default_config)
default_site.truncate()
with open(users_path, 'w') as users_file:
users_file.write(self.freeradius_user_conf.format(self.SESSION_TIMEOUT))
with open('%s/freeradius/clients.conf' % self.tmpdir, 'w') as clients:
clients.write("""client localhost {
ipaddr = 127.0.0.1
secret = SECRET
}""")
with open('%s/freeradius/sites-enabled/inner-tunnel' % self.tmpdir, 'r+') as innertunnel_site:
tunnel_config = innertunnel_site.read()
listen_config = """listen {
ipaddr = 127.0.0.1
port = %d
type = auth
}""" % (self.RADIUS_PORT + 2)
tunnel_config = re.sub(listen_match, listen_config, tunnel_config)
innertunnel_site.seek(0)
innertunnel_site.write(tunnel_config)
innertunnel_site.truncate()
os.system('chmod o+rx %s' % self.root_tmpdir)
os.system('chown -R root:freerad %s/freeradius/' % self.tmpdir)
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'freeradius -X -l %s -d %s/freeradius &' % (radius_log_path, self.tmpdir),
300))
self.wait_for_radius(radius_log_path)
return radius_log_path
class Faucet8021XSuccessTest(Faucet8021XBaseTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'logoff'}}]
SESSION_TIMEOUT = 3600
def test_untagged(self):
self.verify_host_success(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, False)
self.verify_host_success(
self.eapol2_host, self.port_map['port_2'], self.wpasupplicant_conf_1, True)
self.post_test_checks()
class Faucet8021XFailureTest(Faucet8021XBaseTest):
"""Failure due to incorrect identity/password"""
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="user"
password="wrongpassword"
}
"""
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'failure'}}]
def test_untagged(self):
self.assertFalse(
self.try_8021x(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, and_logoff=False, expect_success=False))
self.post_test_checks()
class Faucet8021XPortStatusTest(Faucet8021XBaseTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}}]
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
port_no4 = self.port_map['port_4']
self.wait_8021x_flows(port_no1)
self.set_port_down(port_no1)
# self.wait_until_no_matching_flow(None, table_id=0, actions=actions)
self.set_port_up(port_no1)
self.wait_8021x_flows(port_no1)
self.set_port_down(port_no4)
# self.wait_until_no_matching_flow(match, table_id=0, actions=actions)
self.set_port_up(port_no4)
self.wait_8021x_flows(port_no1)
# check only have rules for port 2 installed, after the NFV port comes up
self.set_port_down(port_no1)
self.flap_port(port_no4)
self.wait_8021x_flows(port_no2)
# no portno1
self.set_port_up(port_no1)
self.wait_8021x_flows(port_no1)
# When the port goes down, and up the host should not be authenticated anymore.
self.assertTrue(self.retry_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(), require_host_learned=False)
# terminate so don't automatically reauthenticate when port goes back up.
self.terminate_wpasupplicant(self.eapol1_host)
self.flap_port(port_no1)
self.wait_8021x_flows(port_no1)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XPortFlapTest(Faucet8021XBaseTest):
def test_untagged(self):
port_no1 = self.port_map['port_1']
for _ in range(2):
self.set_port_up(port_no1)
self.assertTrue(self.retry_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.set_port_down(port_no1)
self.assertFalse(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False, expect_success=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
wpa_status = self.get_wpa_status(
self.eapol1_host, self.get_wpa_ctrl_path(self.eapol1_host))
self.assertNotEqual('SUCCESS', wpa_status)
# Kill supplicant so cant reply to the port up identity request.
self.terminate_wpasupplicant(self.eapol1_host)
self.post_test_checks()
class Faucet8021XIdentityOnPortUpTest(Faucet8021XBaseTest):
def test_untagged(self):
port_no1 = self.port_map['port_1']
# start wpa sup, logon, then send id request. should then be 2 success.
self.set_port_up(port_no1)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False,
tcpdump_timeout=180, tcpdump_packets=6))
self.set_port_down(port_no1)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
def port_up(port):
self.set_port_up(port)
self.wait_8021x_flows(port)
tcpdump_filter = 'ether proto 0x888e'
tcpdump_txt = self.tcpdump_helper(
self.eapol1_host, tcpdump_filter, [
lambda: port_up(port_no1)],
timeout=80, vflags='-vvv', packets=10)
for req_str in (
'len 5, Request (1)', # assume that this is the identity request
'Identity: user', # supplicant replies with username
'Success', # supplicant success
):
self.assertTrue(req_str in tcpdump_txt)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True, retries=10)
self.post_test_checks()
class Faucet8021XPeriodicReauthTest(Faucet8021XBaseTest):
SESSION_TIMEOUT = 15
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_labels1 = self.port_labels(port_no1)
self.set_port_up(port_no1)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
start_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels1, default=0)
for expected_offset in range(4):
expected_total = start_total + expected_offset
for _ in range(self.SESSION_TIMEOUT * 2):
total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels1, default=0)
if total == expected_total:
break
time.sleep(1)
self.assertEqual(expected_total, total, msg='failed to successfully re-auth')
self.post_test_checks()
class Faucet8021XConfigReloadTest(Faucet8021XBaseTest):
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
self.wait_8021x_flows(port_no1)
self.wait_8021x_flows(port_no2)
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['interfaces'][port_no1]['dot1x'] = False
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
self.wait_8021x_flows(port_no2)
self.post_test_checks()
class Faucet8021XCustomACLLoginTest(Faucet8021XBaseTest):
"""Ensure that 8021X Port ACLs Work before and after Login"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
auth_acl:
- rule:
dl_type: 0x800 # Allow ICMP / IPv4
ip_proto: 1
actions:
allow: True
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
noauth_acl:
- rule:
dl_type: 0x800 # Deny ICMP / IPv4
ip_proto: 1
actions:
allow: False
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
auth_acl: auth_acl
noauth_acl: noauth_acl
interfaces:
%(port_1)d:
name: b1
description: "b1"
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_acl: True
%(port_2)d:
name: b2
description: "b2"
native_vlan: 100
# 802.1X client.
dot1x: True
dot1x_acl: True
%(port_3)d:
name: b3
description: "b3"
native_vlan: 100
# ping host.
%(port_4)d:
name: b4
description: "b4"
output_only: True
# "NFV host - interface used by controller."
"""
def test_untagged(self):
self.verify_host_success(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, False)
self.post_test_checks()
class Faucet8021XCustomACLLogoutTest(Faucet8021XCustomACLLoginTest):
"""Ensure that 8021X Port ACLs Work before and after Logout"""
def test_untagged(self):
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XMABTest(Faucet8021XSuccessTest):
"""Ensure that 802.1x Port Supports Mac Auth Bypass."""
DOT1X_EXPECTED_EVENTS = [{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC',
'status': 'success'}},
]
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_mab: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: 100
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
def start_freeradius(self):
# Add the host mac address to the FreeRADIUS config
self.freeradius_user_conf += '\n{0} Cleartext-Password := "{0}"'.format(
str(self.eapol1_host.MAC()).replace(':', '')
)
return super(Faucet8021XMABTest, self).start_freeradius()
@staticmethod
def dhclient_callback(host, timeout):
dhclient_cmd = 'dhclient -d -1 %s' % host.defaultIntf()
return host.cmd(mininet_test_util.timeout_cmd(dhclient_cmd, timeout), verbose=True)
def test_untagged(self):
port_no1 = self.port_map['port_1']
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.dhclient_callback(self.eapol1_host, 10)
self.wait_until_matching_lines_from_file(r'.*AAA_SUCCESS.*', self.env['faucet']['FAUCET_LOG'])
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.assertEqual(
1,
self.scrape_prometheus_var('port_dot1x_success_total', labels=self.port_labels(port_no1), default=0))
self.post_test_checks()
class Faucet8021XDynACLLoginTest(Faucet8021XCustomACLLoginTest):
"""Ensure that 8021X Port ACLs Work before and after Logout"""
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'success'}},
]
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="filter_id_user_accept"
password="accept_pass"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="filter_id_user_deny"
password="deny_pass"
}
"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
accept_acl:
dot1x_assigned: True
rules:
- rule:
dl_type: 0x800 # Allow ICMP / IPv4
ip_proto: 1
actions:
allow: True
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
deny_acl:
dot1x_assigned: True
rules:
- rule:
dl_type: 0x800 # Deny ICMP / IPv4
ip_proto: 1
actions:
allow: False
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
name: b1
description: "b1"
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_dyn_acl: True
%(port_2)d:
name: b2
description: "b2"
native_vlan: 100
# 802.1X client.
dot1x: True
dot1x_dyn_acl: True
%(port_3)d:
name: b3
description: "b3"
native_vlan: 100
# ping host.
%(port_4)d:
name: b4
description: "b4"
output_only: True
# "NFV host - interface used by controller."
"""
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XDynACLLogoutTest(Faucet8021XDynACLLoginTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'logoff'}}
]
def test_untagged(self):
port_no1 = self.port_map['port_1']
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XVLANTest(Faucet8021XSuccessTest):
"""Test that two hosts are put into vlans.
Same VLAN, Logoff, diff VLANs, port flap."""
CONFIG_GLOBAL = """vlans:
100:
vid: 100
description: "untagged"
radiusassignedvlan1:
vid: %u
description: "untagged"
dot1x_assigned: True
radiusassignedvlan2:
vid: %u
description: "untagged"
dot1x_assigned: True
""" % (mininet_test_base.MAX_TEST_VID - 1,
mininet_test_base.MAX_TEST_VID)
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: radiusassignedvlan1
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
RADIUS_PORT = 1940
DOT1X_EXPECTED_EVENTS = []
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="vlanuser1001"
password="password"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="vlanuser2222"
password="milliphone"
}
"""
def test_untagged(self):
vid = 100 ^ mininet_test_base.OFPVID_PRESENT
radius_vid1 = (mininet_test_base.MAX_TEST_VID - 1) ^ mininet_test_base.OFPVID_PRESENT
radius_vid2 = mininet_test_base.MAX_TEST_VID ^ mininet_test_base.OFPVID_PRESENT
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
port_no3 = self.port_map['port_3']
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.wait_until_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid1])
self.wait_until_matching_flow(
{'vlan_vid': radius_vid1},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid2},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
# check ports are back in the right vlans.
self.wait_until_no_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid1])
self.wait_until_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % vid])
# check flood ports are in the right vlans
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid1},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
# check two 1x hosts play nicely. (same dyn vlan)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(
self.eapol1_host, self.eapol2_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(
self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(
self.eapol2_host, self.eapol1_host.IP(),
require_host_learned=False, expected_result=True)
# check two 1x hosts dont play (diff dyn vlan).
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(
self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(
self.eapol2_host, self.eapol1_host.IP(),
require_host_learned=False, expected_result=False)
# move host1 to new VLAN
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(
self.eapol1_host, self.eapol2_host.IP(),
require_host_learned=False, expected_result=True)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol1_host.MAC(),
'vlan_vid': vid},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol1_host.MAC(),
'vlan_vid': radius_vid1},
table_id=self._ETH_SRC_TABLE)
self.wait_until_matching_flow(
{'eth_src': self.eapol1_host.MAC(),
'vlan_vid': radius_vid2},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol1_host.MAC(),
'vlan_vid': vid},
table_id=self._ETH_DST_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol1_host.MAC(),
'vlan_vid': radius_vid1},
table_id=self._ETH_DST_TABLE)
self.wait_until_matching_flow(
{'eth_dst': self.eapol1_host.MAC(),
'vlan_vid': radius_vid2},
table_id=self._ETH_DST_TABLE)
# test port up/down. removes the dynamic vlan & host cache.
self.flap_port(port_no2)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol2_host.MAC()},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol2_host.MAC(),
'vlan_vid': radius_vid1},
table_id=self._ETH_DST_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
# check ports are back in the right vlans.
self.wait_until_no_matching_flow(
{'in_port': port_no2},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid2])
self.wait_until_matching_flow(
{'in_port': port_no2},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % vid])
# check flood ports are in the right vlans
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid2},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.post_test_checks()
class FaucetUntaggedRandomVidTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
randvlan:
vid: 100
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: randvlan
%(port_2)d:
native_vlan: randvlan
%(port_3)d:
native_vlan: randvlan
%(port_4)d:
native_vlan: randvlan
"""
def test_untagged(self):
last_vid = None
for _ in range(5):
vid = random.randint(2, mininet_test_base.MAX_TEST_VID)
if vid == last_vid:
continue
self.change_vlan_config(
'randvlan', 'vid', vid, cold_start=True, hup=True)
self.ping_all_when_learned()
last_vid = vid
class FaucetUntaggedNoCombinatorialFlood(FaucetUntaggedTest):
CONFIG = """
combinatorial_port_flood: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetUntaggedControllerNfvTest(FaucetUntaggedTest):
# Name of switch interface connected to last host, accessible to controller.
last_host_switch_intf = None
def _init_faucet_config(self):
last_host = self.hosts_name_ordered()[-1]
switch = self.first_switch()
last_host_switch_link = switch.connectionsTo(last_host)[0]
self.last_host_switch_intf = [intf for intf in last_host_switch_link if intf in switch.intfList()][0]
# Now that interface is known, FAUCET config can be written to include it.
super(FaucetUntaggedControllerNfvTest, self)._init_faucet_config()
def test_untagged(self):
super(FaucetUntaggedControllerNfvTest, self).test_untagged()
# Confirm controller can see switch interface with traffic.
ifconfig_output = self.net.controllers[0].cmd('ifconfig %s' % self.last_host_switch_intf)
self.assertTrue(
re.search('(R|T)X packets[: ][1-9]', ifconfig_output),
msg=ifconfig_output)
class FaucetUntaggedBroadcastTest(FaucetUntaggedTest):
def test_untagged(self):
super(FaucetUntaggedBroadcastTest, self).test_untagged()
self.verify_broadcast()
self.verify_no_bcast_to_self()
self.verify_unicast_not_looped()
class FaucetUntaggedNSLoopTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
nsonly:
- rule:
dl_type: %u
ip_proto: 58
icmpv6_type: 135
actions:
allow: 1
- rule:
actions:
allow: 0
vlans:
100:
description: "untagged"
""" % IPV6_ETH
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: nsonly
%(port_2)d:
native_vlan: 100
acl_in: nsonly
%(port_3)d:
native_vlan: 100
acl_in: nsonly
%(port_4)d:
native_vlan: 100
acl_in: nsonly
"""
def test_untagged(self):
self.verify_no_bcast_to_self()
class FaucetUntaggedNoCombinatorialBroadcastTest(FaucetUntaggedBroadcastTest):
CONFIG = """
combinatorial_port_flood: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetExperimentalAPITest(FaucetUntaggedTest):
"""Test the experimental Faucet API."""
CONTROLLER_CLASS = mininet_test_topo.FaucetExperimentalAPI
results_file = None
def _set_static_vars(self):
super(FaucetExperimentalAPITest, self)._set_static_vars()
self._set_var_path('faucet', 'API_TEST_RESULT', 'result.txt')
self.results_file = self.env['faucet']['API_TEST_RESULT']
def test_untagged(self):
self.wait_until_matching_lines_from_file(r'.*pass.*', self.results_file)
class FaucetUntaggedLogRotateTest(FaucetUntaggedTest):
def test_untagged(self):
faucet_log = self.env['faucet']['FAUCET_LOG']
self.assertTrue(os.path.exists(faucet_log))
os.rename(faucet_log, faucet_log + '.old')
self.assertTrue(os.path.exists(faucet_log + '.old'))
self.flap_all_switch_ports()
self.assertTrue(os.path.exists(faucet_log))
class FaucetUntaggedLLDPTest(FaucetUntaggedTest):
CONFIG = """
lldp_beacon:
send_interval: 5
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
system_name: "faucet"
port_descr: "first_port"
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
@staticmethod
def wireshark_payload_format(payload_str):
formatted_payload_str = ''
groupsize = 4
for payload_offset in range(len(payload_str) // groupsize):
char_count = payload_offset * 2
if char_count % 0x10 == 0:
formatted_payload_str += '0x%4.4x: ' % char_count
payload_fragment = payload_str[payload_offset * groupsize:][:groupsize]
formatted_payload_str += ' ' + payload_fragment
return formatted_payload_str
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
timeout = 5 * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
timeout=timeout, vflags='-vv', packets=1)
oui_prefix = ''.join(self.FAUCET_MAC.split(':')[:3])
faucet_lldp_dp_id_attr = '%2.2x' % 1
expected_lldp_dp_id = ''.join((
oui_prefix,
faucet_lldp_dp_id_attr,
binascii.hexlify(str(self.dpid).encode('UTF-8')).decode()))
for lldp_required in (
r'%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC,
r'Application type \[voice\] \(0x01\), Flags \[Tagged\]Vlan id 50',
r'System Name TLV \(5\), length 6: faucet',
r'Port Description TLV \(4\), length 10: first_port',
self.wireshark_payload_format(expected_lldp_dp_id)):
self.assertTrue(
re.search(lldp_required, tcpdump_txt),
msg='%s: %s' % (lldp_required, tcpdump_txt))
class FaucetUntaggedLLDPDefaultFallbackTest(FaucetUntaggedTest):
CONFIG = """
lldp_beacon:
send_interval: 5
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
"""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
timeout = 5 * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
timeout=timeout, vflags='-vv', packets=1)
for lldp_required in (
r'%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC,
r'Application type \[voice\] \(0x01\), Flags \[Tagged\]Vlan id 50',
r'System Name TLV \(5\), length 8: faucet-1',
r'Port Description TLV \(4\), length [1-9]: b%u' % self.port_map['port_1']):
self.assertTrue(
re.search(lldp_required, tcpdump_txt),
msg='%s: %s' % (lldp_required, tcpdump_txt))
class FaucetUntaggedMeterParseTest(FaucetUntaggedTest):
REQUIRES_METERS = True
OVS_TYPE = 'user'
CONFIG_GLOBAL = """
meters:
lossymeter:
meter_id: 1
entry:
flags: "KBPS"
bands:
[
{
type: "DROP",
rate: 100
}
]
acls:
lossyacl:
- rule:
actions:
meter: lossymeter
allow: 1
vlans:
100:
description: "untagged"
"""
class FaucetUntaggedApplyMeterTest(FaucetUntaggedMeterParseTest):
CONFIG = """
interfaces:
%(port_1)d:
acl_in: lossyacl
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
super(FaucetUntaggedApplyMeterTest, self).test_untagged()
first_host, second_host = self.hosts_name_ordered()[:2]
error('metered ping flood: %s' % first_host.cmd('ping -c 10000 -f %s' % second_host.IP()))
class FaucetUntaggedHairpinTest(FaucetUntaggedTest):
NETNS = True
CONFIG = """
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Create macvlan interfaces, with one in a separate namespace,
# to force traffic between them to be hairpinned via FAUCET.
first_host, second_host = self.hosts_name_ordered()[:2]
macvlan1_intf = 'macvlan1'
macvlan1_ipv4 = '10.0.0.100'
macvlan2_intf = 'macvlan2'
macvlan2_ipv4 = '10.0.0.101'
self.add_macvlan(first_host, macvlan1_intf, ipa=macvlan1_ipv4, mode='vepa')
self.add_macvlan(first_host, macvlan2_intf, mode='vepa')
macvlan2_mac = self.get_host_intf_mac(first_host, macvlan2_intf)
netns = self.hostns(first_host)
setup_cmds = []
setup_cmds.extend(
['ip link set %s netns %s' % (macvlan2_intf, netns)])
for exec_cmd in (
('ip address add %s/24 brd + dev %s' % (
macvlan2_ipv4, macvlan2_intf),
'ip link set %s up' % macvlan2_intf)):
setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd))
self.quiet_commands(first_host, setup_cmds)
self.one_ipv4_ping(first_host, macvlan2_ipv4, intf=macvlan1_ipv4)
self.one_ipv4_ping(first_host, second_host.IP())
# Verify OUTPUT:IN_PORT flood rules are exercised.
self.wait_nonzero_packet_count_flow(
{'in_port': self.port_map['port_1'],
'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE, actions=['OUTPUT:IN_PORT'])
self.wait_nonzero_packet_count_flow(
{'in_port': self.port_map['port_1'],
'dl_dst': macvlan2_mac},
table_id=self._ETH_DST_HAIRPIN_TABLE, actions=['OUTPUT:IN_PORT'])
class FaucetUntaggedGroupHairpinTest(FaucetUntaggedHairpinTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedTcpIPv4IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
for _ in range(3):
self.ping_all_when_learned()
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
self.flap_all_switch_ports()
class FaucetUntaggedTcpIPv6IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_interface('fc00::1:1/112')
second_host_ip = ipaddress.ip_interface('fc00::1:2/112')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
for _ in range(3):
self.ping_all_when_learned()
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip.ip, second_host_ip.ip,
sync_counters_func=lambda: self.one_ipv6_ping(first_host, second_host_ip.ip))
self.flap_all_switch_ports()
class FaucetSanityTest(FaucetUntaggedTest):
"""Sanity test - make sure test environment is correct before running all tess."""
def verify_dp_port_healthy(self, dp_port, retries=5, min_mbps=MIN_MBPS):
for _ in range(retries):
port_desc = self.get_port_desc_from_dpid(self.dpid, dp_port)
port_name = port_desc['name']
port_state = port_desc['state']
port_config = port_desc['config']
port_speed_mbps = (port_desc['curr_speed'] * 1e3) / 1e6
error('DP %u is %s, at %u mbps\n' % (dp_port, port_name, port_speed_mbps))
if port_speed_mbps < min_mbps:
error('port speed %u below minimum %u mbps\n' % (
port_speed_mbps, min_mbps))
elif port_config != 0:
error('port config %u must be 0 (all clear)' % port_config)
elif port_state not in (0, 4):
error('state %u must be 0 (all flags clear or live)\n' % (
port_state))
else:
return
time.sleep(1)
self.fail('DP port %u not healthy (%s)' % (dp_port, port_desc))
def test_portmap(self):
prom_desc = self.scrape_prometheus(
controller='faucet', var='of_dp_desc_stats')
self.assertIsNotNone(prom_desc, msg='Cannot scrape of_dp_desc_stats')
error('DP: %s\n' % prom_desc[0])
error('port_map: %s\n' % self.port_map)
for i, host in enumerate(self.hosts_name_ordered(), start=1):
in_port = 'port_%u' % i
dp_port = self.port_map[in_port]
if dp_port in self.switch_map:
error('verifying cabling for %s: host %s -> dp %u\n' % (
in_port, self.switch_map[dp_port], dp_port))
else:
error('verifying host %s -> dp %s\n' % (
in_port, dp_port))
self.verify_dp_port_healthy(dp_port)
self.require_host_learned(host, in_port=dp_port)
learned = self.prom_macs_learned()
self.assertEqual(
len(self.hosts_name_ordered()), len(learned),
msg='test requires exactly %u hosts learned (got %s)' % (
len(self.hosts_name_ordered()), learned))
def test_listening(self):
msg_template = (
'Processes listening on test, or all interfaces may interfere with tests. '
'Please deconfigure them (e.g. configure interface as "unmanaged"):\n\n%s')
controller = self._get_controller()
ss_out = controller.cmd('ss -lnep').splitlines()
listening_all_re = re.compile(r'^.+\s+(\*:\d+|:::\d+)\s+(:+\*|\*:\*).+$')
listening_all = [line for line in ss_out if listening_all_re.match(line)]
for test_intf in list(self.switch_map.values()):
int_re = re.compile(r'^.+\b%s\b.+$' % test_intf)
listening_int = [line for line in ss_out if int_re.match(line)]
self.assertFalse(
len(listening_int),
msg=(msg_template % '\n'.join(listening_int)))
if listening_all:
print('Warning: %s' % (msg_template % '\n'.join(listening_all)))
def test_silence(self):
# Make all test hosts silent and ensure we hear no other packets.
for host in self.hosts_name_ordered():
self.host_drop_all_ips(host)
host.cmd('echo 1 > /proc/sys/net/ipv6/conf/%s/disable_ipv6' % host.defaultIntf())
for host in self.hosts_name_ordered():
tcpdump_filter = ''
tcpdump_txt = self.tcpdump_helper(
host, tcpdump_filter, [], timeout=10, vflags='-vv', packets=1)
self.tcpdump_rx_packets(tcpdump_txt, 0)
self.assertTrue(
self.tcpdump_rx_packets(tcpdump_txt, 0),
msg='got unexpected packet from test switch: %s' % tcpdump_txt)
class FaucetUntaggedPrometheusGaugeTest(FaucetUntaggedTest):
"""Testing Gauge Prometheus"""
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
"""
config_ports = {'gauge_prom_port': None}
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
db: 'prometheus'
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
db: 'prometheus'
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 5
db: 'prometheus'
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
def _start_gauge_check(self):
if not self.gauge_controller.listen_port(self.config_ports['gauge_prom_port']):
return 'gauge not listening on prometheus port'
return None
def test_untagged(self):
self.wait_dp_status(1, controller='gauge')
self.assertIsNotNone(self.scrape_prometheus_var(
'faucet_pbr_version', any_labels=True, controller='gauge', retries=3))
conf = self._get_faucet_conf()
cookie = conf['dps'][self.DP_NAME]['cookie']
if not self.wait_ports_updating(self.port_map.keys(), self.PORT_VARS):
self.fail(msg='Gauge Prometheus port counters not increasing')
for _ in range(self.DB_TIMEOUT * 3):
updated_counters = True
for host in self.hosts_name_ordered():
host_labels = {
'dp_id': self.dpid,
'dp_name': self.DP_NAME,
'cookie': cookie,
'eth_dst': host.MAC(),
'inst_count': str(1),
'table_id': str(self._ETH_DST_TABLE),
'vlan': str(100),
'vlan_vid': str(4196)
}
packet_count = self.scrape_prometheus_var(
'flow_packet_count_eth_dst', labels=host_labels, controller='gauge')
byte_count = self.scrape_prometheus_var(
'flow_byte_count_eth_dst', labels=host_labels, controller='gauge')
if packet_count is None or packet_count == 0:
updated_counters = False
if byte_count is None or byte_count == 0:
updated_counters = False
if updated_counters:
return
time.sleep(1)
self.fail(msg='Gauge Prometheus flow counters not increasing')
class FaucetUntaggedInfluxTest(FaucetUntaggedTest):
"""Basic untagged VLAN test with Influx."""
GAUGE_CONFIG_DBS = """
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.1'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_retries: 1
""" + """
influx_timeout: %u
""" % FaucetUntaggedTest.DB_TIMEOUT
config_ports = {'gauge_influx_port': None}
influx_log = None
server_thread = None
server = None
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 2
db: 'influx'
port_state:
dps: ['%s']
type: 'port_state'
interval: 2
db: 'influx'
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 2
db: 'influx'
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
def setup_influx(self):
self.influx_log = os.path.join(self.tmpdir, 'influx.log')
if self.server:
self.server.influx_log = self.influx_log
self.server.timeout = self.DB_TIMEOUT
def setUp(self): # pylint: disable=invalid-name
self.handler = InfluxPostHandler
super(FaucetUntaggedInfluxTest, self).setUp()
self.setup_influx()
def tearDown(self): # pylint: disable=invalid-name
if self.server:
self.server.shutdown()
self.server.socket.close()
super(FaucetUntaggedInfluxTest, self).tearDown()
def _wait_error_shipping(self, timeout=None):
if timeout is None:
timeout = self.DB_TIMEOUT * 3 * 2
gauge_log_name = self.env['gauge']['GAUGE_LOG']
self.wait_until_matching_lines_from_file(
r'.+error shipping.+', gauge_log_name, timeout=timeout)
def _verify_influx_log(self, retries=3):
self.assertTrue(os.path.exists(self.influx_log))
expected_vars = {
'dropped_in', 'dropped_out', 'bytes_out', 'flow_packet_count',
'errors_in', 'bytes_in', 'flow_byte_count', 'port_state_reason',
'packets_in', 'packets_out'}
observed_vars = set()
for _ in range(retries):
with open(self.influx_log) as influx_log:
influx_log_lines = influx_log.readlines()
for point_line in influx_log_lines:
point_fields = point_line.strip().split()
self.assertEqual(3, len(point_fields), msg=point_fields)
ts_name, value_field, _ = point_fields
value = float(value_field.split('=')[1])
ts_name_fields = ts_name.split(',')
self.assertGreater(len(ts_name_fields), 1)
observed_vars.add(ts_name_fields[0])
label_values = {}
for label_value in ts_name_fields[1:]:
label, value = label_value.split('=')
label_values[label] = value
if ts_name.startswith('flow'):
self.assertTrue('inst_count' in label_values, msg=point_line)
if 'vlan_vid' in label_values:
self.assertEqual(
int(label_values['vlan']), int(value) ^ 0x1000)
if expected_vars == observed_vars:
break
time.sleep(1)
self.assertEqual(expected_vars, observed_vars)
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
def _wait_influx_log(self):
for _ in range(self.DB_TIMEOUT * 3):
if os.path.exists(self.influx_log):
return
time.sleep(1)
def _start_gauge_check(self):
influx_port = self.config_ports['gauge_influx_port']
try:
self.server = QuietHTTPServer(
(mininet_test_util.LOCALHOST, influx_port),
self.handler) # pytype: disable=attribute-error
self.server.timeout = self.DB_TIMEOUT
self.server_thread = threading.Thread(
target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
return None
except socket.error as err:
return 'cannot start Influx test server: %s' % err
def test_untagged(self):
self.ping_all_when_learned()
self.hup_gauge()
self.flap_all_switch_ports()
self._wait_influx_log()
self._verify_influx_log()
class FaucetUntaggedMultiDBWatcherTest(
FaucetUntaggedInfluxTest, FaucetUntaggedPrometheusGaugeTest):
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.1'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_retries: 1
""" + """
influx_timeout: %u
""" % FaucetUntaggedTest.DB_TIMEOUT
config_ports = {
'gauge_prom_port': None,
'gauge_influx_port': None}
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
dbs: ['prometheus', 'influx']
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
dbs: ['prometheus', 'influx']
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 5
dbs: ['prometheus', 'influx']
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
@staticmethod
def test_tagged():
return
def test_untagged(self):
self.wait_dp_status(1, controller='gauge')
self.assertTrue(self.wait_ports_updating(self.port_map.keys(), self.PORT_VARS))
self.ping_all_when_learned()
self.hup_gauge()
self.flap_all_switch_ports()
self._wait_influx_log()
self._verify_influx_log()
class FaucetUntaggedInfluxDownTest(FaucetUntaggedInfluxTest):
def _start_gauge_check(self):
return None
def test_untagged(self):
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetUntaggedInfluxUnreachableTest(FaucetUntaggedInfluxTest):
GAUGE_CONFIG_DBS = """
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.2'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_timeout: 2
"""
def _start_gauge_check(self):
return None
def test_untagged(self):
self.gauge_controller.cmd(
'route add 127.0.0.2 gw 127.0.0.1 lo')
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetSingleUntaggedInfluxTooSlowTest(FaucetUntaggedInfluxTest):
def setUp(self): # pylint: disable=invalid-name
self.handler = SlowInfluxPostHandler
super().setUp()
self.setup_influx()
def test_untagged(self):
self.ping_all_when_learned()
self._wait_influx_log()
self.assertTrue(os.path.exists(self.influx_log))
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetNailedForwardingTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
port: %(port_2)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
port: %(port_2)d
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 2
%(port_3)d:
native_vlan: 100
acl_in: 3
%(port_4)d:
native_vlan: 100
acl_in: 4
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
class FaucetNailedFailoverForwardingTest(FaucetNailedForwardingTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
failover:
group_id: 1001
ports: [%(port_2)d, %(port_3)d]
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
failover:
group_id: 1002
ports: [%(port_2)d, %(port_3)d]
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
def test_untagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
third_host.setMAC('0e:00:00:00:02:02')
third_host.setIP(second_host.IP())
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
self.set_port_down(self.port_map['port_2'])
self.one_ipv4_ping(
first_host, third_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
third_host, first_host.IP(), require_host_learned=False)
class FaucetUntaggedLLDPBlockedTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.verify_lldp_blocked()
# Verify 802.1x flood block triggered.
self.wait_nonzero_packet_count_flow(
{'dl_dst': '01:80:c2:00:00:00/ff:ff:ff:ff:ff:f0'},
table_id=self._FLOOD_TABLE)
class FaucetUntaggedCDPTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.verify_cdp_blocked()
class FaucetTaggedAndUntaggedSameVlanTest(FaucetTest):
"""Test mixture of tagged and untagged hosts on the same VLAN."""
N_TAGGED = 1
N_UNTAGGED = 3
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "mixed"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetTaggedAndUntaggedSameVlanTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=1, n_untagged=3, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
"""Test connectivity including after port flapping."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
self.verify_broadcast()
self.verify_no_bcast_to_self()
class FaucetTaggedAndUntaggedSameVlanEgressTest(FaucetTaggedAndUntaggedSameVlanTest):
REQUIRES_METADATA = True
CONFIG = """
egress_pipeline: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetTaggedAndUntaggedSameVlanGroupTest(FaucetTaggedAndUntaggedSameVlanTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedMaxHostsTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: 2
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.pingAll()
learned_hosts = [
host for host in self.hosts_name_ordered() if self.host_learned(host)]
self.assertEqual(2, len(learned_hosts))
self.assertEqual(2, self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
self.assertGreater(
self.scrape_prometheus_var(
'vlan_learn_bans', {'vlan': '100'}), 0)
class FaucetMaxHostsPortTest(FaucetUntaggedTest):
MAX_HOSTS = 3
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
max_hosts: 3
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.ping_all_when_learned()
for i in range(10, 10+(self.MAX_HOSTS*2)):
mac_intf = 'mac%u' % i
mac_ipv4 = '10.0.0.%u' % i
self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4)
second_host.cmd('ping -c1 -I%s %s > /dev/null &' % (mac_intf, first_host.IP()))
flows = self.get_matching_flows_on_dpid(
self.dpid,
{'dl_vlan': '100', 'in_port': int(self.port_map['port_2'])},
table_id=self._ETH_SRC_TABLE)
self.assertEqual(self.MAX_HOSTS, len(flows))
port_labels = self.port_labels(self.port_map['port_2'])
self.assertGreater(
self.scrape_prometheus_var(
'port_learn_bans', port_labels), 0)
learned_macs = [
mac for _, mac in self.scrape_prometheus_var(
'learned_macs', dict(port_labels, vlan=100),
multiple=True) if mac]
self.assertEqual(self.MAX_HOSTS, len(learned_macs))
class FaucetSingleHostsTimeoutPrometheusTest(FaucetUntaggedTest):
"""Test for hosts that have been learnt are exported via prometheus.
Hosts should timeout, and the exported prometheus values should
be overwritten.
If the maximum number of MACs at any one time is 5, then only 5 values
should be exported, even if over 2 hours, there are 100 MACs learnt
"""
TIMEOUT = 15
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 15
arp_neighbor_timeout: 4
nd_neighbor_timeout: 4
ignore_learn_ins: 0
learn_jitter: 0
cache_update_guard_time: 1
""" + CONFIG_BOILER_UNTAGGED
def hosts_learned(self, hosts):
"""Check that hosts are learned by FAUCET on the expected ports."""
macs_learned = []
for mac, port in hosts.items():
if self.prom_mac_learned(mac, port=port):
self.mac_learned(mac, in_port=port)
macs_learned.append(mac)
return macs_learned
def verify_hosts_learned(self, first_host, second_host, mac_ips, hosts):
for _ in range(3):
fping_out = first_host.cmd(mininet_test_util.timeout_cmd(
'fping -i300 -c3 %s' % ' '.join(mac_ips), 5))
macs_learned = self.hosts_learned(hosts)
if len(macs_learned) == len(hosts):
return
time.sleep(1)
first_host_diag = first_host.cmd('ifconfig -a ; arp -an')
second_host_diag = second_host.cmd('ifconfig -a ; arp -an')
self.fail('%s cannot be learned (%s != %s)\nfirst host %s\nsecond host %s\n' % (
mac_ips, macs_learned, fping_out, first_host_diag, second_host_diag))
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
all_learned_mac_ports = {}
# learn batches of hosts, then down them
for base in (10, 20, 30):
def add_macvlans(base, count):
mac_intfs = []
mac_ips = []
learned_mac_ports = {}
for i in range(base, base + count):
mac_intf = 'mac%u' % i
mac_intfs.append(mac_intf)
mac_ipv4 = '10.0.0.%u' % i
mac_ips.append(mac_ipv4)
self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4)
macvlan_mac = self.get_mac_of_intf(mac_intf, second_host)
learned_mac_ports[macvlan_mac] = self.port_map['port_2']
return (mac_intfs, mac_ips, learned_mac_ports)
def down_macvlans(macvlans):
for macvlan in macvlans:
second_host.cmd('ip link set dev %s down' % macvlan)
def learn_then_down_hosts(base, count):
mac_intfs, mac_ips, learned_mac_ports = add_macvlans(base, count)
self.verify_hosts_learned(first_host, second_host, mac_ips, learned_mac_ports)
down_macvlans(mac_intfs)
return learned_mac_ports
learned_mac_ports = learn_then_down_hosts(base, 5)
all_learned_mac_ports.update(learned_mac_ports)
# make sure at least one host still learned
learned_macs = self.hosts_learned(all_learned_mac_ports)
self.assertTrue(learned_macs)
before_expiry_learned_macs = learned_macs
# make sure they all eventually expire
for _ in range(self.TIMEOUT * 3):
learned_macs = self.hosts_learned(all_learned_mac_ports)
self.verify_learn_counters(
100, list(range(1, len(self.hosts_name_ordered()) + 1)))
if not learned_macs:
break
time.sleep(1)
self.assertFalse(learned_macs, msg='MACs did not expire: %s' % learned_macs)
self.assertTrue(before_expiry_learned_macs)
for mac in before_expiry_learned_macs:
self.wait_until_no_matching_flow({'eth_dst': mac}, table_id=self._ETH_DST_TABLE)
class FaucetSingleHostsNoIdleTimeoutPrometheusTest(FaucetSingleHostsTimeoutPrometheusTest):
"""Test broken reset idle timer on flow refresh workaround."""
CONFIG = """
timeout: 15
arp_neighbor_timeout: 4
nd_neighbor_timeout: 4
ignore_learn_ins: 0
learn_jitter: 0
cache_update_guard_time: 1
idle_dst: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetSingleL3LearnMACsOnPortTest(FaucetUntaggedTest):
# TODO: currently set to accommodate least hardware
def _max_hosts():
return 512
MAX_HOSTS = _max_hosts()
TEST_IPV4_NET = '10.0.0.0'
TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4
LEARN_IPV4 = '10.0.254.254'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: %u
faucet_vips: ["10.0.254.254/16"]
""" % (_max_hosts() + 4)
CONFIG = ("""
ignore_learn_ins: 0
metrics_rate_limit_sec: 3
table_sizes:
eth_src: %u
eth_dst: %u
ipv4_fib: %u
""" % (_max_hosts() + 64, _max_hosts() + 64, _max_hosts() + 64) +
"""
interfaces:
%(port_1)d:
native_vlan: 100
max_hosts: 4096
%(port_2)d:
native_vlan: 100
max_hosts: 4096
%(port_3)d:
native_vlan: 100
max_hosts: 4096
%(port_4)d:
native_vlan: 100
max_hosts: 4096
""")
def test_untagged(self):
test_net = ipaddress.IPv4Network(
'%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX))
learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4)
self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS)
class FaucetSingleL2LearnMACsOnPortTest(FaucetUntaggedTest):
# TODO: currently set to accommodate least hardware
def _max_hosts():
return 1024
MAX_HOSTS = _max_hosts()
TEST_IPV4_NET = '10.0.0.0'
TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4
LEARN_IPV4 = '10.0.0.1'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: %u
""" % (_max_hosts() + 4)
CONFIG = ("""
ignore_learn_ins: 0
metrics_rate_limit_sec: 3
table_sizes:
eth_src: %u
eth_dst: %u
""" % (_max_hosts() + 64, _max_hosts() + 64) +
"""
interfaces:
%(port_1)d:
native_vlan: 100
max_hosts: 4096
%(port_2)d:
native_vlan: 100
max_hosts: 4096
%(port_3)d:
native_vlan: 100
max_hosts: 4096
%(port_4)d:
native_vlan: 100
max_hosts: 4096
""")
def test_untagged(self):
test_net = ipaddress.IPv4Network(
'%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX))
learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4)
self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS)
class FaucetUntaggedHUPTest(FaucetUntaggedTest):
"""Test handling HUP signal without config change."""
def _configure_count_with_retry(self, expected_count):
for _ in range(3):
configure_count = self.get_configure_count()
if configure_count == expected_count:
return
time.sleep(1)
self.fail('configure count %u != expected %u' % (
configure_count, expected_count))
def test_untagged(self):
"""Test that FAUCET receives HUP signal and keeps switching."""
init_config_count = self.get_configure_count()
reload_type_vars = (
'faucet_config_reload_cold',
'faucet_config_reload_warm')
reload_vals = {}
for var in reload_type_vars:
reload_vals[var] = self.scrape_prometheus_var(
var, dpid=True, default=None)
for i in range(init_config_count, init_config_count+3):
self._configure_count_with_retry(i)
with open(self.faucet_config_path, 'a') as config_file:
config_file.write('\n')
self.verify_faucet_reconf(change_expected=False)
self._configure_count_with_retry(i+1)
self.assertEqual(
self.scrape_prometheus_var(
'of_dp_disconnections_total', dpid=True, default=None),
0)
self.assertEqual(
self.scrape_prometheus_var(
'of_dp_connections_total', dpid=True, default=None),
1)
self.wait_until_controller_flow()
self.ping_all_when_learned()
for var in reload_type_vars:
self.assertEqual(
reload_vals[var],
self.scrape_prometheus_var(var, dpid=True, default=None))
class FaucetIPv4TupleTest(FaucetTest):
MAX_RULES = 1024
ETH_TYPE = IPV4_ETH
NET_BASE = ipaddress.IPv4Network('10.0.0.0/16')
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
table_sizes:
port_acl: 1100
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
"""
START_ACL_CONFIG = """
acls:
1:
exact_match: True
rules:
- rule:
actions: {allow: 1}
eth_type: 2048
ip_proto: 6
ipv4_dst: 127.0.0.1
ipv4_src: 127.0.0.1
tcp_dst: 65535
tcp_src: 65535
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetIPv4TupleTest, self).setUp()
self.acl_config_file = os.path.join(self.tmpdir, 'acl.txt')
self.CONFIG = '\n'.join(
(self.CONFIG, 'include:\n - %s' % self.acl_config_file))
open(self.acl_config_file, 'w').write(self.START_ACL_CONFIG)
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def _push_tuples(self, eth_type, host_ips):
max_rules = len(host_ips)
rules = 1
while rules <= max_rules:
rules_yaml = []
for rule in range(rules):
host_ip = host_ips[rule]
port = (rule + 1) % 2**16
ip_match = str(host_ip)
rule_yaml = {
'eth_type': eth_type,
'ip_proto': 6,
'tcp_src': port,
'tcp_dst': port,
'ipv%u_src' % host_ip.version: ip_match,
'ipv%u_dst' % host_ip.version: ip_match,
'actions': {'allow': 1},
}
rules_yaml.append({'rule': rule_yaml})
yaml_acl_conf = {'acls': {1: {'exact_match': True, 'rules': rules_yaml}}}
tuple_txt = '%u IPv%u tuples\n' % (len(rules_yaml), host_ip.version)
error('pushing %s' % tuple_txt)
self.reload_conf(
yaml_acl_conf, self.acl_config_file, # pytype: disable=attribute-error
restart=True, cold_start=False)
error('pushed %s' % tuple_txt)
self.wait_until_matching_flow(
{'tp_src': port, 'ip_proto': 6, 'dl_type': eth_type}, table_id=0)
rules *= 2
def test_tuples(self):
host_ips = [host_ip for host_ip in itertools.islice(
self.NET_BASE.hosts(), self.MAX_RULES)]
self._push_tuples(self.ETH_TYPE, host_ips)
class FaucetIPv6TupleTest(FaucetIPv4TupleTest):
MAX_RULES = 1024
ETH_TYPE = IPV6_ETH
NET_BASE = ipaddress.IPv6Network('fc00::00/64')
START_ACL_CONFIG = """
acls:
1:
exact_match: True
rules:
- rule:
actions: {allow: 1}
eth_type: 34525
ip_proto: 6
ipv6_dst: ::1
ipv6_src: ::1
tcp_dst: 65535
tcp_src: 65535
"""
class FaucetConfigReloadTestBase(FaucetTest):
"""Test handling HUP signal with config change."""
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: allow
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
tagged_vlans: [200]
"""
ACL = """
acls:
1:
- rule:
description: "rule 1"
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
cookie: COOKIE
actions:
allow: 1
2:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 1
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 0
- rule:
cookie: COOKIE
actions:
allow: 1
3:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5003
actions:
allow: 0
4:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
deny:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 65535
actions:
allow: 0
- rule:
cookie: COOKIE
actions:
allow: 0
allow:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 65535
actions:
allow: 1
- rule:
cookie: COOKIE
actions:
allow: 1
"""
ACL_COOKIE = None
def setUp(self): # pylint: disable=invalid-name
super(FaucetConfigReloadTestBase, self).setUp()
self.ACL_COOKIE = random.randint(1, 2**16-1)
self.ACL = self.ACL.replace('COOKIE', str(self.ACL_COOKIE))
self.acl_config_file = '%s/acl.yaml' % self.tmpdir
with open(self.acl_config_file, 'w') as config_file:
config_file.write(self.ACL)
self.CONFIG = '\n'.join(
(self.CONFIG, 'include:\n - %s' % self.acl_config_file))
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
class FaucetDelPortTest(FaucetConfigReloadTestBase):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: allow
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 200
"""
def test_port_down_flow_gone(self):
last_host = self.hosts_name_ordered()[-1]
self.require_host_learned(last_host)
second_host_dst_match = {'eth_dst': last_host.MAC()}
self.wait_until_matching_flow(
second_host_dst_match, table_id=self._ETH_DST_TABLE)
self.change_port_config(
self.port_map['port_4'], None, None,
restart=True, cold_start=False)
self.wait_until_no_matching_flow(
second_host_dst_match, table_id=self._ETH_DST_TABLE)
class FaucetConfigReloadTest(FaucetConfigReloadTestBase):
def test_add_unknown_dp(self):
conf = self._get_faucet_conf()
conf['dps']['unknown'] = {
'dp_id': int(self.rand_dpid()),
'hardware': 'Open vSwitch',
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
def test_tabs_are_bad(self):
self.ping_all_when_learned()
self.assertEqual(0, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
orig_conf = self._get_faucet_conf()
self.force_faucet_reload(
'\t'.join(('tabs', 'are', 'bad')))
self.assertEqual(1, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
self.ping_all_when_learned()
self.reload_conf(
orig_conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
self.assertEqual(0, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
def test_port_change_vlan(self):
first_host, second_host = self.hosts_name_ordered()[:2]
third_host, fourth_host = self.hosts_name_ordered()[2:]
self.ping_all_when_learned()
self.change_port_config(
self.port_map['port_1'], 'native_vlan', 200,
restart=False, cold_start=False)
self.change_port_config(
self.port_map['port_2'], 'native_vlan', 200,
restart=True, cold_start=True)
for port_name in ('port_1', 'port_2'):
self.wait_until_matching_flow(
{'in_port': int(self.port_map[port_name])},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:4296}'])
self.one_ipv4_ping(first_host, second_host.IP(), require_host_learned=False)
# hosts 1 and 2 now in VLAN 200, so they shouldn't see floods for 3 and 4.
self.verify_vlan_flood_limited(
third_host, fourth_host, first_host)
def test_port_change_acl(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
orig_conf = self._get_faucet_conf()
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1,
cold_start=False)
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1']),
'eth_type': IPV4_ETH, 'tcp_dst': 5001, 'ip_proto': 6},
table_id=self._PORT_ACL_TABLE, cookie=self.ACL_COOKIE)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.reload_conf(
orig_conf, self.faucet_config_path,
restart=True, cold_start=False, host_cache=100)
self.verify_tp_dst_notblocked(
5001, first_host, second_host, table_id=None)
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=None)
def test_port_change_perm_learn(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
self.change_port_config(
self.port_map['port_1'], 'permanent_learn', True,
restart=True, cold_start=False)
self.ping_all_when_learned(hard_timeout=0)
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.ping((second_host, third_host)))
self.retry_net_ping(hosts=(first_host, second_host))
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned(hard_timeout=0)
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1,
restart=True, cold_start=False)
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1']),
'eth_type': IPV4_ETH, 'tcp_dst': 5001, 'ip_proto': 6},
table_id=self._PORT_ACL_TABLE)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetDeleteConfigReloadTest(FaucetConfigReloadTestBase):
def test_delete_interface(self):
# With all ports changed, we should cold start.
conf = self._get_faucet_conf()
del conf['dps'][self.DP_NAME]['interfaces']
conf['dps'][self.DP_NAME]['interfaces'] = {
int(self.port_map['port_1']): {
'native_vlan': '100',
'tagged_vlans': ['200'],
}
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
class FaucetRouterConfigReloadTest(FaucetConfigReloadTestBase):
def test_router_config_reload(self):
conf = self._get_faucet_conf()
conf['routers'] = {
'router-1': {
'vlans': ['100', '200'],
}
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
class FaucetConfigReloadAclTest(FaucetConfigReloadTestBase):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acls_in: [allow]
%(port_2)d:
native_vlan: 100
acl_in: allow
%(port_3)d:
native_vlan: 100
acl_in: deny
%(port_4)d:
native_vlan: 100
acl_in: deny
"""
def _verify_hosts_learned(self, hosts):
self.pingAll()
for host in hosts:
self.require_host_learned(host)
self.assertEqual(len(hosts), self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
def test_port_acls(self):
hup = not self.STAT_RELOAD
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self._verify_hosts_learned((first_host, second_host))
self.change_port_config(
self.port_map['port_3'], 'acl_in', 'allow',
restart=True, cold_start=False, hup=hup)
self.change_port_config(
self.port_map['port_1'], 'acls_in', [3, 4, 'allow'],
restart=True, cold_start=False, hup=hup)
self.coldstart_conf(hup=hup)
self._verify_hosts_learned((first_host, second_host, third_host))
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.verify_tp_dst_blocked(5003, first_host, second_host)
class FaucetConfigStatReloadAclTest(FaucetConfigReloadAclTest):
# Use the stat-based reload method.
STAT_RELOAD = '1'
class FaucetUntaggedBGPDualstackDefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24", "fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1", "::1"]
neighbor_addresses: ["127.0.0.1", "::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_alias_ip = ipaddress.ip_interface('10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
for _ in range(2):
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv4DefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_alias_ip = ipaddress.ip_interface('10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: 10.99.99.0/24
ip_gw: 10.0.0.1
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 10.0.1.0/24 next-hop 10.0.0.1 local-preference 100;
route 10.0.2.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.3.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.4.0/24 next-hop 10.0.0.254;
route 10.0.5.0/24 next-hop 10.10.0.1;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.hosts_name_ordered()[:2]
# wait until 10.0.0.1 has been resolved
self.wait_for_route_as_flow(
first_host.MAC(), ipaddress.IPv4Network('10.99.99.0/24'))
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route(r'.+10.0.4.0\/24.+cannot be us$')
self.verify_invalid_bgp_route(r'.+10.0.5.0\/24.+because nexthop not in VLAN.+')
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.0.3.0/24'))
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.verify_traveling_dhcp_mac()
class FaucetUntaggedIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and export to BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(mininet_test_util.LOCALHOST)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes sent."""
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
# exabgp should have received our BGP updates
updates = self.exabgp_updates(self.exabgp_log)
self.assertTrue(re.search('10.0.0.0/24 next-hop 10.0.0.254', updates))
self.assertTrue(re.search('10.0.1.0/24 next-hop 10.0.0.1', updates))
self.assertTrue(re.search('10.0.2.0/24 next-hop 10.0.0.2', updates))
self.assertTrue(re.search('10.0.2.0/24 next-hop 10.0.0.2', updates))
# test nexthop expired when port goes down
first_host = self.hosts_name_ordered()[0]
match, table = self.match_table(ipaddress.IPv4Network('10.0.0.1/32'))
ofmsg = None
for _ in range(5):
self.one_ipv4_controller_ping(first_host)
ofmsg = self.get_matching_flow(match, table_id=table)
if ofmsg:
break
time.sleep(1)
self.assertTrue(ofmsg, msg=match)
self.set_port_down(self.port_map['port_1'])
for _ in range(5):
if not self.get_matching_flow(match, table_id=table):
return
time.sleep(1)
self.fail('host route %s still present' % match)
class FaucetUntaggedVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.ping_all_when_learned()
self.assertTrue(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
unicast_flood: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# VLAN level config to disable flooding takes precedence,
# cannot enable port-only flooding.
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
unicast_flood: False
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedHostMoveTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.retry_net_ping(hosts=(first_host, second_host))
self.swap_host_macs(first_host, second_host)
self.ping((first_host, second_host))
for host, in_port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.require_host_learned(host, in_port=in_port)
self.retry_net_ping(hosts=(first_host, second_host))
class FaucetUntaggedHostPermanentLearnTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
permanent_learn: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.ping_all_when_learned(hard_timeout=0)
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self.assertTrue(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_1']))
# 3rd host impersonates 1st but 1st host still OK
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.ping((second_host, third_host)))
self.assertTrue(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_1']))
self.assertFalse(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_3']))
self.retry_net_ping(hosts=(first_host, second_host))
# 3rd host stops impersonating, now everything fine again.
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned(hard_timeout=0)
class FaucetUntaggedLoopTest(FaucetTest):
NUM_DPS = 1
N_TAGGED = 0
N_UNTAGGED = 2
LINKS_PER_HOST = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
loop_protect: True
%(port_4)d:
native_vlan: 100
loop_protect: True
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetUntaggedLoopTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def total_port_bans(self):
total_bans = 0
for i in range(self.LINKS_PER_HOST * self.N_UNTAGGED):
port_labels = self.port_labels(self.port_map['port_%u' % (i + 1)])
total_bans += self.scrape_prometheus_var(
'port_learn_bans', port_labels, dpid=True, default=0)
return total_bans
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()
# Normal learning works
self.one_ipv4_ping(first_host, second_host.IP())
start_bans = self.total_port_bans()
# Create a loop between interfaces on second host - a veth pair,
# with two bridges, each connecting one leg of the pair to a host
# interface.
self.quiet_commands(second_host, (
'ip link add name veth-loop1 type veth peer name veth-loop2',
'ip link set veth-loop1 up',
'ip link set veth-loop2 up',
# TODO: tune for loop mitigation performance.
'tc qdisc add dev veth-loop1 root tbf rate 1000kbps latency 10ms burst 1000',
'tc qdisc add dev veth-loop2 root tbf rate 1000kbps latency 10ms burst 1000',
# Connect one leg of veth pair to first host interface.
'brctl addbr br-loop1',
'brctl setfd br-loop1 0',
'ip link set br-loop1 up',
'brctl addif br-loop1 veth-loop1',
'brctl addif br-loop1 %s-eth0' % second_host.name,
# Connect other leg of veth pair.
'brctl addbr br-loop2',
'brctl setfd br-loop2 0',
'ip link set br-loop2 up',
'brctl addif br-loop2 veth-loop2',
'brctl addif br-loop2 %s-eth1' % second_host.name))
# Flood some traffic into the loop
for _ in range(3):
first_host.cmd('fping -i10 -c3 10.0.0.254')
end_bans = self.total_port_bans()
if end_bans > start_bans:
return
time.sleep(1)
self.assertGreater(end_bans, start_bans)
# Break the loop, and learning should work again
self.quiet_commands(second_host, (
'ip link set veth-loop1 down',
'ip link set veth-loop2 down',))
self.one_ipv4_ping(first_host, second_host.IP())
class FaucetUntaggedIPv4LACPTest(FaucetTest):
NUM_DPS = 1
N_TAGGED = 0
N_UNTAGGED = 2
LINKS_PER_HOST = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
lacp_timeout: 3
interfaces:
%(port_1)d:
native_vlan: 100
lacp: 1
%(port_2)d:
native_vlan: 100
lacp: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetUntaggedIPv4LACPTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
bond = 'bond0'
# Linux driver should have this state (0x3f/63)
#
# Actor State: 0x3f, LACP Activity, LACP Timeout, Aggregation, Synchronization, Collecting, Distributing
# .... ...1 = LACP Activity: Active
# .... ..1. = LACP Timeout: Short Timeout
# .... .1.. = Aggregation: Aggregatable
# .... 1... = Synchronization: In Sync
# ...1 .... = Collecting: Enabled
# ..1. .... = Distributing: Enabled
# .0.. .... = Defaulted: No
# 0... .... = Expired: No
# [Actor State Flags: **DCSGSA]
# FAUCET should have this state (0x3e/62)
# Actor State: 0x3e, LACP Timeout, Aggregation, Synchronization, Collecting, Distributing
# .... ...0 = LACP Activity: Passive
# .... ..1. = LACP Timeout: Short Timeout
# .... .1.. = Aggregation: Aggregatable
# .... 1... = Synchronization: In Sync
# ...1 .... = Collecting: Enabled
# ..1. .... = Distributing: Enabled
# .0.. .... = Defaulted: No
# 0... .... = Expired: No
# [Actor State Flags: **DCSGS*]
lag_ports = (1, 2)
synced_state_txt = r"""
Slave Interface: \S+-eth0
MII Status: up
Speed: \d+ Mbps
Duplex: full
Link Failure Count: \d+
Permanent HW addr: \S+
Slave queue ID: 0
Aggregator ID: \d+
Actor Churn State: monitoring
Partner Churn State: monitoring
Actor Churned Count: 0
Partner Churned Count: 0
details actor lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:99
port key: \d+
port priority: 255
port number: \d+
port state: 63
details partner lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:01
oper key: 1
port priority: 255
port number: %d
port state: 62
Slave Interface: \S+-eth1
MII Status: up
Speed: \d+ Mbps
Duplex: full
Link Failure Count: \d+
Permanent HW addr: \S+
Slave queue ID: 0
Aggregator ID: \d+
Actor Churn State: monitoring
Partner Churn State: monitoring
Actor Churned Count: 0
Partner Churned Count: 0
details actor lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:99
port key: \d+
port priority: 255
port number: \d+
port state: 63
details partner lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:01
oper key: 1
port priority: 255
port number: %d
port state: 62
""".strip() % tuple([self.port_map['port_%u' % i] for i in lag_ports])
lacp_timeout = 5
def prom_lag_status():
lacp_up_ports = 0
for lacp_port in lag_ports:
port_labels = self.port_labels(self.port_map['port_%u' % lacp_port])
lacp_up_ports += self.scrape_prometheus_var(
'port_lacp_status', port_labels, default=0)
return lacp_up_ports
def require_lag_status(status):
for _ in range(lacp_timeout*10):
if prom_lag_status() == status:
break
time.sleep(1)
self.assertEqual(prom_lag_status(), status)
def require_linux_bond_up():
for _retries in range(lacp_timeout*2):
result = first_host.cmd('cat /proc/net/bonding/%s|sed "s/[ \t]*$//g"' % bond)
result = '\n'.join([line.rstrip() for line in result.splitlines()])
with open(os.path.join(self.tmpdir, 'bonding-state.txt'), 'w') as state_file:
state_file.write(result)
if re.search(synced_state_txt, result):
break
time.sleep(1)
self.assertTrue(
re.search(synced_state_txt, result),
msg='LACP did not synchronize: %s\n\nexpected:\n\n%s' % (
result, synced_state_txt))
self.assertEqual(0, prom_lag_status())
orig_ip = first_host.IP()
switch = self.first_switch()
bond_members = [pair[0].name for pair in first_host.connectionsTo(switch)]
# Deconfigure bond members
for bond_member in bond_members:
self.quiet_commands(first_host, (
'ip link set %s down' % bond_member,
'ip address flush dev %s' % bond_member))
# Configure bond interface
self.quiet_commands(first_host, (
('ip link add %s address 0e:00:00:00:00:99 '
'type bond mode 802.3ad lacp_rate fast miimon 100') % bond,
'ip add add %s/24 dev %s' % (orig_ip, bond),
'ip link set %s up' % bond))
# Add bond members
for bond_member in bond_members:
self.quiet_commands(first_host, (
'ip link set dev %s master %s' % (bond_member, bond),))
for _flaps in range(2):
for port in lag_ports:
self.set_port_up(self.port_map['port_%u' % port])
require_lag_status(2)
require_linux_bond_up()
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond)
for port in lag_ports:
self.set_port_down(self.port_map['port_%u' % port])
require_lag_status(0)
class FaucetUntaggedIPv4LACPMismatchTest(FaucetUntaggedIPv4LACPTest):
"""Ensure remote LACP system ID mismatch is logged."""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
orig_ip = first_host.IP()
switch = self.first_switch()
bond_members = [pair[0].name for pair in first_host.connectionsTo(switch)]
for i, bond_member in enumerate(bond_members):
bond = 'bond%u' % i
self.quiet_commands(first_host, (
'ip link set %s down' % bond_member,
'ip address flush dev %s' % bond_member,
('ip link add %s address 0e:00:00:00:00:%2.2x '
'type bond mode 802.3ad lacp_rate fast miimon 100') % (bond, i*2+i),
'ip add add %s/24 dev %s' % (orig_ip, bond),
'ip link set %s up' % bond,
'ip link set dev %s master %s' % (bond_member, bond)))
log_file = os.path.join(self.tmpdir, 'faucet.log')
self.wait_until_matching_lines_from_file(r'.+actor system mismatch.+', log_file)
class FaucetUntaggedIPv4ControlPlaneFuzzTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_ping_fragment_controller(self):
first_host = self.hosts_name_ordered()[0]
first_host.cmd('ping -s 1476 -c 3 %s' % self.FAUCET_VIPV4.ip)
self.one_ipv4_controller_ping(first_host)
def test_fuzz_controller(self):
first_host = self.hosts_name_ordered()[0]
self.one_ipv4_controller_ping(first_host)
packets = 1000
fuzz_template = 'python3 -c \"from scapy.all import * ; scapy.all.send(%s, count=%u)\"'
for fuzz_cmd in (
fuzz_template % ('IP(dst=\'%s\')/fuzz(%s(type=0))' % (self.FAUCET_VIPV4.ip, 'ICMP'), packets),
fuzz_template % ('IP(dst=\'%s\')/fuzz(%s(type=8))' % (self.FAUCET_VIPV4.ip, 'ICMP'), packets),
fuzz_template % ('fuzz(%s(pdst=\'%s\'))' % ('ARP', self.FAUCET_VIPV4.ip), packets)):
fuzz_out = first_host.cmd(mininet_test_util.timeout_cmd(fuzz_cmd, 180))
self.assertTrue(
re.search('Sent %u packets' % packets, fuzz_out), msg='%s: %s' % (
fuzz_cmd, fuzz_out))
self.one_ipv4_controller_ping(first_host)
def test_flap_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
for _ in range(5):
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.flap_all_switch_ports()
class FaucetUntaggedIPv4ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_fping_controller(self):
first_host = self.hosts_name_ordered()[0]
self.one_ipv4_controller_ping(first_host)
self.verify_controller_fping(first_host, self.FAUCET_VIPV4)
class FaucetUntaggedIPv6RATest(FaucetUntaggedTest):
FAUCET_MAC = "0e:00:00:00:00:99"
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fe80::1:254/64", "fc00::1:254/112", "fc00::2:254/112", "10.0.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC
CONFIG = """
advertise_interval: 5
""" + CONFIG_BOILER_UNTAGGED
def test_ndisc6(self):
first_host = self.hosts_name_ordered()[0]
for vip in ('fe80::1:254', 'fc00::1:254', 'fc00::2:254'):
self.assertEqual(
self.FAUCET_MAC.upper(),
first_host.cmd('ndisc6 -q %s %s' % (vip, first_host.defaultIntf())).strip())
def test_rdisc6(self):
first_host = self.hosts_name_ordered()[0]
rdisc6_results = sorted(list(set(first_host.cmd(
'rdisc6 -q %s' % first_host.defaultIntf()).splitlines())))
self.assertEqual(
['fc00::1:0/112', 'fc00::2:0/112'],
rdisc6_results)
def test_ra_advertise(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = ' and '.join((
'ether dst 33:33:00:00:00:01',
'ether src %s' % self.FAUCET_MAC,
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [], timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'ethertype IPv6 \(0x86dd\), length 142',
r'fe80::1:254 > ff02::1:.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s' % (ra_required, tcpdump_txt))
def test_rs_reply(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = ' and '.join((
'ether src %s' % self.FAUCET_MAC,
'ether dst %s' % first_host.MAC(),
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd(
'rdisc6 -1 %s' % first_host.defaultIntf())],
timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'fe80::1:254 > fe80::.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s (%s)' % (ra_required, tcpdump_txt, tcpdump_filter))
class FaucetUntaggedIPv6ControlPlaneFuzzTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_flap_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
for _ in range(5):
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.flap_all_switch_ports()
def test_fuzz_controller(self):
first_host = self.hosts_name_ordered()[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
fuzz_success = False
packets = 1000
count = 0
abort = False
def note(*args):
"Add a message to the log"
error('%s:' % self._test_name(), *args + tuple('\n'))
# Some of these tests have been slowing down and timing out,
# So this code is intended to allow some debugging and analysis
for fuzz_class in dir(scapy.all):
if fuzz_class.startswith('ICMPv6'):
fuzz_cmd = ("from scapy.all import * ;"
"scapy.all.send(IPv6(dst='%s')/fuzz(%s()),count=%u)" %
(self.FAUCET_VIPV6.ip, fuzz_class, packets))
out, start, too_long = '', time.time(), 30 # seconds
popen = first_host.popen('python3', '-c', fuzz_cmd)
for _, line in pmonitor({first_host: popen}):
out += line
if time.time() - start > too_long:
note('stopping', fuzz_class, 'after >', too_long, 'seconds')
note('output was:', out)
popen.terminate()
abort = True
break
popen.wait()
if 'Sent %u packets' % packets in out:
count += packets
elapsed = time.time() - start
note('sent', packets, fuzz_class, 'packets in %.2fs' % elapsed)
fuzz_success = True
if abort:
break
note('successfully sent', count, 'packets')
self.assertTrue(fuzz_success)
note('pinging', first_host)
self.one_ipv6_controller_ping(first_host)
note('test_fuzz_controller() complete')
class FaucetUntaggedIPv6ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_fping_controller(self):
first_host = self.hosts_name_ordered()[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
self.verify_controller_fping(first_host, self.FAUCET_VIPV6)
class FaucetTaggedAndUntaggedDiffVlanTest(FaucetTest):
N_TAGGED = 2
N_UNTAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
native_vlan: 101
%(port_4)d:
native_vlan: 101
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetTaggedAndUntaggedDiffVlanTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=2, n_untagged=2, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_separate_untagged_tagged(self):
tagged_host_pair = self.hosts_name_ordered()[:2]
untagged_host_pair = self.hosts_name_ordered()[2:]
self.verify_vlan_flood_limited(
tagged_host_pair[0], tagged_host_pair[1], untagged_host_pair[0])
self.verify_vlan_flood_limited(
untagged_host_pair[0], untagged_host_pair[1], tagged_host_pair[0])
# hosts within VLANs can ping each other
self.retry_net_ping(hosts=tagged_host_pair)
self.retry_net_ping(hosts=untagged_host_pair)
# hosts cannot ping hosts in other VLANs
self.assertEqual(
100, self.ping([tagged_host_pair[0], untagged_host_pair[0]]))
class FaucetUntaggedACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedEgressACLTest(FaucetUntaggedTest):
REQUIRES_METADATA = True
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acl_out: 1
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_port5001_blocked(self):
egress_acl_table = self.scrape_prometheus_var(
'faucet_config_table_names',
labels={'table_name': 'egress_acl'}
)
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=egress_acl_table)
self.ping_all_when_learned()
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=egress_acl_table)
def test_port5002_notblocked(self):
egress_acl_table = self.scrape_prometheus_var(
'faucet_config_table_names',
labels={'table_name': 'egress_acl'}
)
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=egress_acl_table)
class FaucetUntaggedDPACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
dp_acls: [1]
""" + CONFIG_BOILER_UNTAGGED
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedNoReconfACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
opstatus_reconf: False
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
matches = {
'in_port': int(self.port_map['port_1']),
'tcp_dst': 5001,
'eth_type': IPV4_ETH,
'ip_proto': 6}
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
self.set_port_down(self.port_map['port_1'])
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
self.set_port_up(self.port_map['port_1'])
self.ping_all_when_learned()
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
class FaucetUntaggedACLTcpMaskTest(FaucetUntaggedACLTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
ip_proto: 6
# Match packets > 1023
tcp_dst: 1024/1024
actions:
allow: 0
- rule:
actions:
allow: 1
"""
def test_port_gt1023_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(1024, first_host, second_host, mask=1024)
self.verify_tp_dst_notblocked(1023, first_host, second_host, table_id=None)
class FaucetUntaggedVLANACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
actions:
allow: 1
vlans:
100:
description: "untagged"
acl_in: 1
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=self._VLAN_ACL_TABLE)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=self._VLAN_ACL_TABLE)
class FaucetUntaggedOutputOnlyTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
output_only: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1'])},
table_id=self._VLAN_TABLE,
actions=[])
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self.assertEqual(100.0, self.ping((first_host, second_host)))
self.assertEqual(0, self.ping((third_host, second_host)))
class FaucetUntaggedACLMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
mirror: %(port_3)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
def test_eapol_mirrored(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_eapol_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedACLOutputMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
output:
ports: [%(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedACLMirrorDefaultAllowTest(FaucetUntaggedACLMirrorTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
mirror: %(port_3)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetMultiOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
200:
acls:
multi_out:
- rule:
actions:
output:
ports: [%(port_2)d, %(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: multi_out
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()[0:4]
tcpdump_filter = ('icmp')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
third_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (third_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd('ping -c1 %s' % third_host.IP())])
self.assertTrue(re.search(
'%s: ICMP echo request' % third_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
fourth_host, tcpdump_filter, [
lambda: first_host.cmd('ping -c1 %s' % fourth_host.IP())])
self.assertFalse(re.search(
'%s: ICMP echo request' % fourth_host.IP(), tcpdump_txt))
class FaucetUntaggedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
vlan_vid: 123
set_fields:
- eth_dst: "06:06:06:06:06:06"
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = ('icmp and ether dst 06:06:06:06:06:06')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 123', tcpdump_txt))
class FaucetUntaggedMultiVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
vlan_vids: [123, 456]
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'vlan'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 456.+vlan 123', tcpdump_txt))
class FaucetUntaggedMultiConfVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
vlan_vids: [{vid: 123, eth_type: 0x88a8}, 456]
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'ether proto 0x88a8'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())],
packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt), msg=tcpdump_txt)
self.assertTrue(re.search(
'vlan 456.+ethertype 802.1Q-QinQ, vlan 123', tcpdump_txt), msg=tcpdump_txt)
class FaucetUntaggedMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
# port 3 will mirror port 1
mirror: %(port_1)d
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.flap_all_switch_ports()
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
class FaucetUntaggedMultiMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
output_only: True
%(port_4)d:
output_only: True
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[:3]
ping_pairs = (
(first_host, second_host),
(second_host, first_host))
self.flap_all_switch_ports()
self.change_port_config(
self.port_map['port_3'], 'mirror',
[self.port_map['port_1'], self.port_map['port_2']],
restart=True, cold_start=False, hup=True)
self.verify_ping_mirrored_multi(
ping_pairs, mirror_host, both_mirrored=True)
class FaucetUntaggedMultiMirrorSepTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
mirror: %(port_1)d
%(port_4)d:
mirror: %(port_1)d
"""
def test_untagged(self):
self.flap_all_switch_ports()
# Make sure the two hosts both mirror from port 1
first_host, second_host = self.hosts_name_ordered()[0:2]
mirror_host = self.hosts_name_ordered()[2]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
mirror_host = self.hosts_name_ordered()[3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedTest(FaucetTest):
N_UNTAGGED = 0
N_TAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = CONFIG_TAGGED_BOILER
def setUp(self): # pylint: disable=invalid-name
super(FaucetTaggedTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=4, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_tagged(self):
self.ping_all_when_learned()
class FaucetTaggedMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
# port 3 will mirror port 1
mirror: %(port_1)d
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.flap_all_switch_ports()
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
class FaucetTaggedVLANPCPTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
vlan_pcp: 1
actions:
output:
set_fields:
- vlan_pcp: 2
allow: 1
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.quiet_commands(
first_host,
['ip link set %s type vlan egress %u:1' % (
first_host.defaultIntf(), i) for i in range(0, 8)])
self.one_ipv4_ping(first_host, second_host.IP())
self.wait_nonzero_packet_count_flow(
{'vlan_vid': 100, 'vlan_pcp': 1}, table_id=self._PORT_ACL_TABLE)
tcpdump_filter = 'ether dst %s' % second_host.MAC()
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'ping -c3 %s' % second_host.IP())], root_intf=True, packets=1)
self.assertTrue(re.search('vlan 100, p 2,', tcpdump_txt))
class FaucetMPLSTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
mpls:
# test MPLS push entry
- rule:
dl_type: 0x800 # ipv4
actions:
output:
mpls_label: 55
port: %(port_2)d
# test MPLS pop entry
- rule:
dl_type: 0x8847 # mpls
mpls_label: 50
mpls_bos: 1
actions:
output:
pop_mpls: 0x800
port: %(port_2)d
# test MPLS pop entry
- rule:
dl_type: 0x8847 # mpls
mpls_label: 51
mpls_bos: 1
actions:
output:
mpls_label: 52
port: %(port_2)d
# test MPLS pop entry
- rule:
dl_type: 0x8847 # mpls
mpls_label: 53
mpls_bos: 0
actions:
output:
pop_mpls: 0x8847
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: mpls
%(port_2)d:
native_vlan: 100
acl_in: mpls
%(port_3)d:
native_vlan: 100
"""
def test_untagged(self):
command_template = ('python3 -c \"from scapy.all import * ;'
' load_contrib(\'mpls\'); sendp(%s, iface=\'%s\', count=%u)"')
# test incoming IPv4 packet has MPLS header pushed on
hello_template = (
'Ether(src=\'%s\', dst=\'%s\')/'
'IP(src=\'%s\', dst=\'%s\')/'
'UDP(dport=68)/'
'b\'payload\'')
hostone, hosttwo = self.net.hosts[0], self.net.hosts[1]
tcpdump_filter = 'mpls'
tcpdump_txt = self.tcpdump_helper(
hosttwo, tcpdump_filter, [
partial(hostone.cmd, (command_template % (
hello_template % (hostone.MAC(), hosttwo.MAC(), hostone.IP(), hosttwo.IP()),
hostone.defaultIntf(), 1)))], timeout=5, vflags='-vv -A', packets=1)
self.assertTrue(re.search('MPLS', tcpdump_txt))
# test incoming packet MPLS header is poped off
hello_template = (
'Ether(src=\'%s\', dst=\'%s\',type=0x8847)/'
'MPLS(label=50, s=1, ttl=255)/'
'IP(src=\'%s\', dst=\'%s\')/'
'UDP(dport=68)/'
'b\'payload\'')
hostone, hosttwo = self.net.hosts[0], self.net.hosts[1]
tcpdump_filter = 'udp'
tcpdump_txt = self.tcpdump_helper(
hosttwo, tcpdump_filter, [
partial(hostone.cmd, (command_template % (
hello_template % (hostone.MAC(), hosttwo.MAC(), hostone.IP(), hosttwo.IP()),
hostone.defaultIntf(), 1)))], timeout=5, vflags='-vv -A', packets=1)
self.assertTrue(not re.search('MPLS', tcpdump_txt))
# test incoming packet has second MPLS label pushed on
hello_template = (
'Ether(src=\'%s\', dst=\'%s\',type=0x8847)/'
'MPLS(label=51, s=1, ttl=255)/'
'IP(src=\'%s\', dst=\'%s\')/'
'UDP(dport=68)/'
'b\'payload\'')
hostone, hosttwo = self.net.hosts[0], self.net.hosts[1]
tcpdump_filter = 'mpls'
tcpdump_txt = self.tcpdump_helper(
hosttwo, tcpdump_filter, [
partial(hostone.cmd, (command_template % (
hello_template % (hostone.MAC(), hosttwo.MAC(), hostone.IP(), hosttwo.IP()),
hostone.defaultIntf(), 1)))], timeout=5, vflags='-vv -A', packets=1)
self.assertTrue(re.search("MPLS \(label 52", tcpdump_txt))
# test incoming packet MPLS header is poped off
hello_template = (
'Ether(src=\'%s\', dst=\'%s\',type=0x8847)/'
'MPLS(label=53, s=0, ttl=255)/'
'MPLS(label=51, s=1, ttl=255)/'
'IP(src=\'%s\', dst=\'%s\')/'
'UDP(dport=68)/'
'b\'payload\'')
hostone, hosttwo = self.net.hosts[0], self.net.hosts[1]
tcpdump_filter = 'mpls'
tcpdump_txt = self.tcpdump_helper(
hosttwo, tcpdump_filter, [
partial(hostone.cmd, (command_template % (
hello_template % (hostone.MAC(), hosttwo.MAC(), hostone.IP(), hosttwo.IP()),
hostone.defaultIntf(), 1)))], timeout=5, vflags='-vv -A', packets=1)
self.assertTrue(not re.search("MPLS \(label 53", tcpdump_txt))
class FaucetTaggedGlobalIPv4RouteTest(FaucetTaggedTest):
def _vids():
return [i for i in range(100, 148)]
def global_vid():
return 2047
STATIC_GW = False
IPV = 4
NETPREFIX = 24
ETH_TYPE = IPV4_ETH
NETNS = True
VIDS = _vids()
GLOBAL_VID = global_vid()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
@staticmethod
def netbase(vid, host):
return ipaddress.ip_interface('192.168.%u.%u' % (vid, host))
@staticmethod
def fping(macvlan_int, ipg):
return 'fping -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (macvlan_int, ipg)
def fib_table(self):
return self._IPV4_FIB_TABLE
def macvlan_ping(self, host, ipa, macvlan_int):
return self.one_ipv4_ping(host, ipa, intf=macvlan_int)
def ip(self, args):
return 'ip -%u %s' % (self.IPV, args)
CONFIG_GLOBAL = """
routers:
global:
vlans: [%s]
vlans:
%s
""" % (
','.join(STR_VIDS),
'\n'.join(['\n'.join(
(' %u:',
' description: "tagged"',
' faucet_vips: ["192.168.%u.254/24"]')) % (i, i) for i in VIDS]))
CONFIG = """
global_vlan: %u
proactive_learn_v4: True
max_wildcard_table_size: 1024
table_sizes:
vlan: %u
vip: %u
flood: %u
interfaces:
%s:
mirror: %s
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
""" % (global_vid(),
len(STR_VIDS) * 3, # VLAN
len(STR_VIDS) * 2, # VIP
len(STR_VIDS) * 12, # Flood
'%(port_3)d', '%(port_1)d', '%(port_1)d',
','.join(STR_VIDS), '%(port_2)d', ','.join(STR_VIDS))
def test_tagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[:3]
hosts = (first_host, second_host)
required_ipds = set()
ipd_to_macvlan = {}
for i, host in enumerate(hosts, start=1):
setup_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
macvlan_int = 'macvlan%u' % vid
ipa = self.netbase(vid, i)
ipg = self.netbase(vid, 254)
ipd = self.netbase(vid, 253)
required_ipds.add(str(ipd.ip))
ipd_to_macvlan[str(ipd.ip)] = (macvlan_int, host)
setup_commands.extend([
self.ip('link add link %s name %s type vlan id %u' % (
host.intf_root_name, vlan_int, vid)),
self.ip('link set dev %s up' % vlan_int),
self.ip('link add %s link %s type macvlan mode vepa' % (macvlan_int, vlan_int)),
self.ip('link set dev %s up' % macvlan_int),
self.ip('address add %s/%u dev %s' % (ipa.ip, self.NETPREFIX, macvlan_int)),
self.ip('route add default via %s table %u' % (ipg.ip, vid)),
self.ip('rule add from %s table %u priority 100' % (ipa, vid)),
# stimulate learning attempts for down host.
self.ip('neigh add %s lladdr %s dev %s' % (ipd.ip, self.FAUCET_MAC, macvlan_int))])
# next host routes via FAUCET for other host in same connected subnet
# to cause routing to be exercised.
for j, _ in enumerate(hosts, start=1):
if j != i:
other_ip = self.netbase(vid, j)
setup_commands.append(
self.ip('route add %s via %s table %u' % (other_ip, ipg.ip, vid)))
if self.STATIC_GW:
setup_commands.append(
self.ip('neigh add %s lladdr %s dev %s' % (ipg.ip, self.FAUCET_MAC, macvlan_int)))
else:
setup_commands.append(
self.fping(macvlan_int, ipg.ip))
setup_commands.append(self.fping(macvlan_int, ipd.ip))
self.quiet_commands(host, setup_commands)
# verify drop rules present for down hosts
for _ in range(10):
drop_rules = self.get_matching_flows_on_dpid(
self.dpid, {'dl_type': self.ETH_TYPE, 'dl_vlan': str(self.GLOBAL_VID)},
table_id=self.fib_table(), actions=[])
if drop_rules:
for drop_rule in drop_rules:
match = drop_rule['match']
del match['dl_type']
del match['dl_vlan']
self.assertEqual(1, len(match))
ipd = list(match.values())[0].split('/')[0]
if ipd in required_ipds:
required_ipds.remove(ipd)
if not required_ipds:
break
for ipd in required_ipds:
macvlan_int, host = ipd_to_macvlan[ipd]
host.cmd(self.fping(macvlan_int, ipd))
time.sleep(1)
self.assertFalse(required_ipds, msg='no drop rules for %s' % required_ipds)
# verify routing performance
for first_host_ip, second_host_ip in (
(self.netbase(self.NEW_VIDS[0], 1), self.netbase(self.NEW_VIDS[0], 2)),
(self.netbase(self.NEW_VIDS[0], 1), self.netbase(self.NEW_VIDS[-1], 2)),
(self.netbase(self.NEW_VIDS[-1], 1), self.netbase(self.NEW_VIDS[0], 2))):
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip.ip, second_host_ip.ip,
sync_counters_func=lambda: self.scapy_bcast(first_host))
# verify L3 reachability between hosts within each subnet
for vid in self.NEW_VIDS:
macvlan_int = 'macvlan%u' % vid
first_host_ip = self.netbase(vid, 1)
second_host_ip = self.netbase(vid, 2)
self.macvlan_ping(first_host, second_host_ip.ip, macvlan_int)
self.macvlan_ping(second_host, first_host_ip.ip, macvlan_int)
# verify L3 hairpin reachability
macvlan1_int = 'macvlan%u' % self.NEW_VIDS[0]
macvlan2_int = 'macvlan%u' % self.NEW_VIDS[1]
macvlan2_ip = self.netbase(self.NEW_VIDS[1], 1)
macvlan1_gw = self.netbase(self.NEW_VIDS[0], 254)
macvlan2_gw = self.netbase(self.NEW_VIDS[1], 254)
netns = self.hostns(first_host)
setup_cmds = []
setup_cmds.extend(
[self.ip('link set %s netns %s' % (macvlan2_int, netns))])
for exec_cmd in (
(self.ip('address add %s/%u dev %s' % (macvlan2_ip.ip, self.NETPREFIX, macvlan2_int)),
self.ip('link set %s up' % macvlan2_int),
self.ip('route add default via %s' % macvlan2_gw.ip))):
setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd))
setup_cmds.append(
self.ip('route add %s via %s' % (macvlan2_ip, macvlan1_gw.ip)))
self.quiet_commands(first_host, setup_cmds)
self.macvlan_ping(first_host, macvlan2_ip.ip, macvlan1_int)
# Verify mirror.
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedGlobalIPv6RouteTest(FaucetTaggedGlobalIPv4RouteTest):
STATIC_GW = True
IPV = 6
NETPREFIX = 112
ETH_TYPE = IPV6_ETH
def _vids():
return [i for i in range(100, 103)]
def global_vid():
return 2047
VIDS = _vids()
GLOBAL_VID = global_vid()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
def netbase(self, vid, host):
return ipaddress.ip_interface('fc00::%u:%u' % (vid, host))
def fib_table(self):
return self._IPV6_FIB_TABLE
def fping(self, macvlan_int, ipg):
return 'fping6 -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (macvlan_int, ipg)
def macvlan_ping(self, host, ipa, macvlan_int):
return self.one_ipv6_ping(host, ipa, intf=macvlan_int, timeout=2)
def ip(self, args):
return 'ip -%u %s' % (self.IPV, args)
CONFIG_GLOBAL = """
routers:
global:
vlans: [%s]
vlans:
%s
""" % (
','.join(STR_VIDS),
'\n'.join(['\n'.join(
(' %u:',
' description: "tagged"',
' faucet_vips: ["fc00::%u:254/112"]')) % (i, i) for i in VIDS]))
CONFIG = """
global_vlan: %u
proactive_learn_v6: True
max_wildcard_table_size: 512
table_sizes:
vlan: 256
vip: 128
flood: 384
interfaces:
%s:
mirror: %s
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
""" % (global_vid(), '%(port_3)d', '%(port_1)d', '%(port_1)d',
','.join(STR_VIDS), '%(port_2)d', ','.join(STR_VIDS))
class FaucetTaggedScaleTest(FaucetTaggedTest):
def _vids():
return [i for i in range(100, 148)]
VIDS = _vids()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
CONFIG_GLOBAL = """
vlans:
""" + '\n'.join(['\n'.join(
(' %u:',
' description: "tagged"')) % i for i in VIDS])
CONFIG = """
interfaces:
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
""" % ('%(port_1)d', ','.join(STR_VIDS),
'%(port_2)d', ','.join(STR_VIDS),
'%(port_3)d', ','.join(STR_VIDS),
'%(port_4)d', ','.join(STR_VIDS))
def test_tagged(self):
self.ping_all_when_learned()
for host in self.hosts_name_ordered():
setup_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
setup_commands.extend([
'ip link add link %s name %s type vlan id %u' % (
host.intf_root_name, vlan_int, vid),
'ip link set dev %s up' % vlan_int])
self.quiet_commands(host, setup_commands)
for host in self.hosts_name_ordered():
rdisc6_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
rdisc6_commands.append(
'rdisc6 -r2 -w1 -q %s 2> /dev/null' % vlan_int)
self.quiet_commands(host, rdisc6_commands)
for vlan in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
for _ in range(3):
for host in self.hosts_name_ordered():
self.quiet_commands(
host,
['rdisc6 -r2 -w1 -q %s 2> /dev/null' % vlan_int])
vlan_hosts_learned = self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': str(vlan)})
if vlan_hosts_learned == len(self.hosts_name_ordered()):
break
time.sleep(1)
self.assertGreater(
vlan_hosts_learned, 1,
msg='not all VLAN %u hosts learned (%u)' % (vlan, vlan_hosts_learned))
class FaucetTaggedBroadcastTest(FaucetTaggedTest):
def test_tagged(self):
super(FaucetTaggedBroadcastTest, self).test_tagged()
self.verify_broadcast()
self.verify_no_bcast_to_self()
class FaucetTaggedExtLoopProtectTest(FaucetTaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
loop_protect_external: True
%(port_2)d:
tagged_vlans: [100]
loop_protect_external: True
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
ext_port1, ext_port2, int_port1, int_port2 = self.hosts_name_ordered()
self.verify_broadcast((ext_port1, ext_port2), False)
self.verify_broadcast((int_port1, int_port2), True)
self.verify_unicast((int_port1, int_port2), True)
class FaucetTaggedWithUntaggedTest(FaucetTaggedTest):
N_UNTAGGED = 0
N_TAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 200
tagged_vlans: [100]
%(port_2)d:
native_vlan: 200
tagged_vlans: [100]
%(port_3)d:
native_vlan: 200
tagged_vlans: [100]
%(port_4)d:
native_vlan: 200
tagged_vlans: [100]
"""
def test_tagged(self):
self.ping_all_when_learned()
native_ips = [
ipaddress.ip_interface('10.99.99.%u/24' % (i + 1)) for i in range(len(self.hosts_name_ordered()))]
for native_ip, host in zip(native_ips, self.hosts_name_ordered()):
self.host_ipv4_alias(host, native_ip, intf=host.intf_root_name)
for own_native_ip, host in zip(native_ips, self.hosts_name_ordered()):
for native_ip in native_ips:
if native_ip != own_native_ip:
self.one_ipv4_ping(host, native_ip.ip, intf=host.intf_root_name)
class FaucetTaggedSwapVidMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
actions:
mirror: %(port_3)d
force_port_vlan: 1
output:
swap_vid: 101
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
def test_acl(tcpdump_host, tcpdump_filter):
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())], root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
tcpdump_filter, tcpdump_txt))
# Saw swapped VID on second host
test_acl(second_host, 'vlan 101')
# Saw original VID on mirror host
test_acl(third_host, 'vlan 100')
class FaucetTaggedSwapVidOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
101:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
actions:
output:
swap_vid: 101
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the swapped VLAN VID
tcpdump_filter = 'vlan 101'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())], root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 101', tcpdump_txt))
class FaucetTaggedPopVlansOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
pop_vlans: 1
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
tcpdump_filter = 'not vlan and icmp and ether dst 06:06:06:06:06:06'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(
'ping -c1 %s' % second_host.IP())], packets=10, root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
class FaucetTaggedIPv4ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
class FaucetTaggedIPv6ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
class FaucetTaggedICMPv6ACLTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: %u
vlan_vid: 100
ip_proto: 58
icmpv6_type: 135
ipv6_nd_target: "fc00::1:2"
actions:
output:
port: %s
- rule:
actions:
allow: 1
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
""" % (IPV6_ETH, '%(port_2)d')
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_icmpv6_acl_match(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
self.wait_nonzero_packet_count_flow(
{'dl_type': IPV6_ETH, 'ip_proto': 58, 'icmpv6_type': 135,
'ipv6_nd_target': 'fc00::1:2'}, table_id=self._PORT_ACL_TABLE)
class FaucetTaggedIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
200:
description: "not used"
300:
description: "not used"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
native_vlan: 200
"""
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddress.ip_interface('10.0.1.1/24')
second_host_routed_ip = ipaddress.ip_interface('10.0.2.1/24')
for _coldstart in range(2):
for _swaps in range(3):
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.coldstart_conf()
# change of a VLAN/ports not involved in routing, should be a warm start.
for vid in (300, 200):
self.change_port_config(
self.port_map['port_4'], 'native_vlan', vid,
restart=True, cold_start=False)
class FaucetTaggedTargetedResolutionIPv4RouteTest(FaucetTaggedIPv4RouteTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
targeted_gw_resolution: True
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
class FaucetTaggedProactiveNeighborIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface('10.0.0.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '4', 'vlan': '100'}),
1)
class FaucetTaggedProactiveNeighborIPv6RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:3/64"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v6: True
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface('fc00::1:99/64')
faucet_vip_ip = ipaddress.ip_interface('fc00::1:3/126')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, ipaddress.ip_interface('fc00::1:1/64'))
# We use a narrower mask to force second_host to use the /128 route,
# since otherwise it would realize :99 is directly connected via ND and send direct.
self.add_host_ipv6_address(second_host, ipaddress.ip_interface('fc00::1:2/126'))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, faucet_vip_ip.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '6', 'vlan': '100'}),
1)
class FaucetUntaggedIPv4GlobalInterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
200:
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC2 + """
routers:
global:
vlans: [100, 200]
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1", "::1"]
neighbor_addresses: ["127.0.0.1", "::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
global_vlan: 300
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 200
"""
exabgp_peer_conf = """
static {
route 10.99.99.0/24 next-hop 10.200.0.1 local-preference 100;
route 10.0.5.0/24 next-hop 127.0.0.1;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.99.0/24'), vlan_vid=300)
self.verify_invalid_bgp_route(r'.+10.0.5.0\/24.+because nexthop not in VLAN.+')
class FaucetUntaggedIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24", "169.254.1.1/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24", "169.254.2.1/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
for vlanb_vid in (300, 200):
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
self.change_vlan_config(
'vlanb', 'vid', vlanb_vid, restart=True, cold_start=True)
class FaucetUntaggedPortSwapIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
vlana:
vid: 100
faucet_vips: ["10.100.0.254/24", "169.254.1.1/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24", "169.254.2.1/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [vlana, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: vlana
%(port_2)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
# Delete port 2
self.change_port_config(
self.port_map['port_2'], None, None,
restart=False, cold_start=False)
# Add port 3
self.add_port_config(
self.port_map['port_3'], {'native_vlan': 'vlanb'},
restart=True, cold_start=True)
third_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(third_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(third_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(third_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
class FaucetUntaggedExpireIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
max_host_fib_retry_count: 2
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
second_host.cmd('ifconfig %s down' % second_host.defaultIntf().name)
log_file = os.path.join(self.tmpdir, 'faucet.log')
expired_re = r'.+expiring dead route %s.+' % second_host_ip.ip
self.wait_until_matching_lines_from_file(expired_re, log_file)
second_host.cmd('ifconfig %s up' % second_host.defaultIntf().name)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
class FaucetUntaggedIPv6InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["fc00::1:254/112", "fe80::1:254/112"]
vlanb:
vid: 200
faucet_vips: ["fc01::1:254/112", "fe80::2:254/112"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v6: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('fc00::1:1/64')
second_host_net = ipaddress.ip_interface('fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.add_host_ipv6_address(second_host, second_host_net)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedIPv4PolicyRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "100"
faucet_vips: ["10.0.0.254/24"]
acl_in: pbr
200:
description: "200"
faucet_vips: ["10.20.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.20.0.2"
300:
description: "300"
faucet_vips: ["10.30.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.30.0.3"
acls:
pbr:
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.2"
actions:
allow: 1
output:
swap_vid: 300
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.0/24"
actions:
allow: 1
output:
swap_vid: 200
- rule:
actions:
allow: 1
routers:
router-100-200:
vlans: [100, 200]
router-100-300:
vlans: [100, 300]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 300
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# 10.99.0.1 is on b2, and 10.99.0.2 is on b3
# we want to route 10.99.0.0/24 to b2, but we want
# want to PBR 10.99.0.2/32 to b3.
first_host_ip = ipaddress.ip_interface('10.0.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.0.0.254/24')
second_host_ip = ipaddress.ip_interface('10.20.0.2/24')
second_faucet_vip = ipaddress.ip_interface('10.20.0.254/24')
third_host_ip = ipaddress.ip_interface('10.30.0.3/24')
third_faucet_vip = ipaddress.ip_interface('10.30.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
remote_ip = ipaddress.ip_interface('10.99.0.1/24')
remote_ip2 = ipaddress.ip_interface('10.99.0.2/24')
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
third_host.setIP(str(third_host_ip.ip), prefixLen=24)
self.host_ipv4_alias(second_host, remote_ip)
self.host_ipv4_alias(third_host, remote_ip2)
self.add_host_route(first_host, remote_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.add_host_route(third_host, first_host_ip, third_faucet_vip.ip)
# ensure all nexthops resolved.
self.one_ipv4_ping(first_host, first_faucet_vip.ip)
self.one_ipv4_ping(second_host, second_faucet_vip.ip)
self.one_ipv4_ping(third_host, third_faucet_vip.ip)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=200)
self.wait_for_route_as_flow(
third_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=300)
# verify b1 can reach 10.99.0.1 and .2 on b2 and b3 respectively.
self.one_ipv4_ping(first_host, remote_ip.ip)
self.one_ipv4_ping(first_host, remote_ip2.ip)
class FaucetUntaggedMixedIPv4RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["172.16.0.254/24", "10.0.0.254/24"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('10.0.0.1/24')
second_host_net = ipaddress.ip_interface('172.16.0.1/24')
second_host.setIP(str(second_host_net.ip), prefixLen=24)
self.one_ipv4_ping(first_host, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, self.FAUCET_VIPV4_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV4.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV4_2.ip)
self.one_ipv4_ping(first_host, second_host_net.ip)
self.one_ipv4_ping(second_host, first_host_net.ip)
class FaucetUntaggedMixedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112", "fc01::1:254/112"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('fc00::1:1/64')
second_host_net = ipaddress.ip_interface('fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.one_ipv6_ping(first_host, self.FAUCET_VIPV6.ip)
self.add_host_ipv6_address(second_host, second_host_net)
self.one_ipv6_ping(second_host, self.FAUCET_VIPV6_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedBGPIPv6DefaultRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route ::/0 next-hop fc00::1:1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
first_host_alias_ip = ipaddress.ip_interface('fc00::50:1/112')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV6.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.one_ipv6_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route fc00::10:0/112 next-hop fc00::1:1 local-preference 100;
route fc00::20:0/112 next-hop fc00::1:2 local-preference 100;
route fc00::30:0/112 next-hop fc00::1:2 local-preference 100;
route fc00::40:0/112 next-hop fc00::1:254;
route fc00::50:0/112 next-hop fc00::2:2;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route(r'.+fc00::40:0\/112.+cannot be us$')
self.verify_ipv6_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv6_routing_mesh()
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.verify_traveling_dhcp_mac()
class FaucetUntaggedSameVlanIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::10:1/112", "fc00::20:1/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::10:2"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::20:2"
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_interface('fc00::10:2/112')
first_host_ctrl_ip = ipaddress.ip_address('fc00::10:1')
second_host_ip = ipaddress.ip_interface('fc00::20:2/112')
second_host_ctrl_ip = ipaddress.ip_address('fc00::20:1')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_route(
first_host, second_host_ip, first_host_ctrl_ip)
self.add_host_route(
second_host, first_host_ip, second_host_ctrl_ip)
self.wait_for_route_as_flow(
first_host.MAC(), first_host_ip.network)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_ip.network)
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(first_host, second_host_ctrl_ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ctrl_ip)
class FaucetUntaggedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
- route:
ip_dst: "fc00::30:0/112"
ip_gw: "fc00::1:2"
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1')
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
self.verify_ipv6_routing_mesh()
second_host = self.hosts_name_ordered()[1]
self.flap_all_switch_ports()
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv6Network('fc00::30:0/112'))
self.verify_ipv6_routing_mesh()
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
updates = self.exabgp_updates(self.exabgp_log)
self.assertTrue(re.search('fc00::1:0/112 next-hop fc00::1:254', updates))
self.assertTrue(re.search('fc00::10:0/112 next-hop fc00::1:1', updates))
self.assertTrue(re.search('fc00::20:0/112 next-hop fc00::1:2', updates))
self.assertTrue(re.search('fc00::30:0/112 next-hop fc00::1:2', updates))
class FaucetTaggedIPv6RouteTest(FaucetTaggedTest):
"""Test basic IPv6 routing without BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
"""Test IPv6 routing works."""
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_ip = ipaddress.ip_interface('fc00::1:1/112')
second_host_ip = ipaddress.ip_interface('fc00::1:2/112')
first_host_routed_ip = ipaddress.ip_interface('fc00::10:1/112')
second_host_routed_ip = ipaddress.ip_interface('fc00::20:1/112')
for _coldstart in range(2):
for _swaps in range(5):
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.coldstart_conf()
class FaucetStringOfDPTest(FaucetTest):
MAX_HOSTS = 4
NUM_HOSTS = 4
LINKS_PER_HOST = 1
VID = 100
CONFIG = None
GROUP_TABLE = False
dpids = None
topo = None
def non_host_links(self, dpid):
return self.topo.dpid_peer_links(dpid)
@staticmethod
def get_config_header(_config_global, _debug_log, _dpid, _hardware):
"""Don't generate standard config file header."""
return ''
def acls(self):
return {}
def acl_in_dp(self):
return {}
def build_net(self, stack=False, n_dps=1,
n_tagged=0, tagged_vid=100,
untagged_hosts=None,
include=None, include_optional=None,
switch_to_switch_links=1, hw_dpid=None,
stack_ring=False, lacp=False, use_external=False,
router=None, dp_options=None):
"""Set up Mininet and Faucet for the given topology."""
if include is None:
include = []
if include_optional is None:
include_optional = []
self.dpids = [str(self.rand_dpid()) for _ in range(n_dps)]
self.dpids[0] = self.dpid
self.topo = mininet_test_topo.FaucetStringOfDPSwitchTopo(
self.OVS_TYPE,
self.ports_sock,
dpids=self.dpids,
n_tagged=n_tagged,
tagged_vid=tagged_vid,
untagged_hosts=untagged_hosts,
links_per_host=self.LINKS_PER_HOST,
switch_to_switch_links=switch_to_switch_links,
test_name=self._test_name(),
hw_dpid=hw_dpid, switch_map=self.switch_map,
stack_ring=stack_ring,
port_order=self.port_order
)
self.port_maps = {dpid: self.create_port_map(dpid) for dpid in self.dpids}
self.port_map = self.port_maps[self.dpid]
self.CONFIG = self.get_config(
self.dpids,
hw_dpid,
stack,
self.hardware,
self.debug_log_path,
n_tagged,
tagged_vid,
untagged_hosts,
include,
include_optional,
self.acls(),
self.acl_in_dp(),
lacp,
use_external,
router,
dp_options
)
def get_config(self, dpids=None, hw_dpid=None, stack=False, hardware=None, ofchannel_log=None,
n_tagged=0, tagged_vid=0, untagged_hosts=None,
include=None, include_optional=None, acls=None, acl_in_dp=None,
lacp=False, use_external=False, router=None, dp_options=None):
"""Build a complete Faucet configuration for each datapath, using the given topology."""
if dpids is None:
dpids = []
if include is None:
include = []
if include_optional is None:
include_optional = []
if acls is None:
acls = {}
if acl_in_dp is None:
acl_in_dp = {}
dpid_names = {}
dpname_to_dpkey = {}
def dp_name(i):
return 'faucet-%i' % (i + 1)
def add_vlans(n_tagged, tagged_vid, untagged_hosts, router):
vlans_config = {}
if untagged_hosts:
for vid in untagged_hosts.keys():
vlans_config[vid] = {
'description': 'untagged',
}
if ((n_tagged and not untagged_hosts) or
(n_tagged and untagged_hosts and tagged_vid not in untagged_hosts)):
vlans_config[tagged_vid] = {
'description': 'tagged',
}
if router:
for vid in router.keys():
if vid in vlans_config:
if 'faucet_mac' in router[vid]:
vlans_config[vid]['faucet_mac'] = router[vid]['faucet_mac']
if 'faucet_vips' in router[vid]:
vlans_config[vid]['faucet_vips'] = router[vid]['faucet_vips']
return vlans_config
def add_router(router):
router_config = {}
if router:
router_config['router-1'] = {
'vlans': list(router.keys()),
}
return router_config
def add_acl_to_port(name, port, interfaces_config):
if name in acl_in_dp and port in acl_in_dp[name]:
interfaces_config[port]['acl_in'] = acl_in_dp[name][port]
def add_dp_to_dp_ports(name, dpid, dp_config, interfaces_config, stack,
n_tagged, tagged_vid, untagged_hosts):
for link in self.topo.dpid_peer_links(dpid):
port, peer_dpid, peer_port = link.port, link.peer_dpid, link.peer_port
interfaces_config[port] = {}
if stack:
# make this a stacking link.
interfaces_config[port].update(
{
'stack': {
'dp': dpid_names[peer_dpid],
'port': peer_port}
})
else:
# not a stack - make this a trunk.
tagged_vlans = []
if n_tagged:
tagged_vlans.append(tagged_vid)
if untagged_hosts:
for vid in untagged_hosts.keys():
if vid not in tagged_vlans:
tagged_vlans.append(vid)
if tagged_vlans:
interfaces_config[port]['tagged_vlans'] = tagged_vlans
if lacp:
interfaces_config[port].update(
{'lacp': 1, 'lacp_active': True})
add_acl_to_port(name, port, interfaces_config)
# TODO: make per test configurable
dp_config['lacp_timeout'] = 10
# TODO: make the stacking root configurable
first_dp = dpid == self.dpid
if stack and first_dp:
dp_config['stack'] = {
'priority': 1
}
def add_dp(name, dpid, hw_dpid, i, stack,
n_tagged, tagged_vid, untagged_hosts,
use_external, dp_options):
dp_config = {
'dp_id': int(dpid),
'hardware': hardware if dpid == hw_dpid else 'Open vSwitch',
'table_sizes': {'flood': 64},
'ofchannel_log': ofchannel_log + str(i) if ofchannel_log else None,
'interfaces': {},
'group_table': self.GROUP_TABLE,
}
interfaces_config = {}
index = 1
for n_port in range(n_tagged):
port = self.port_maps[dpid]['port_%d' % index]
interfaces_config[port] = {
'tagged_vlans': [tagged_vid],
'loop_protect_external': (use_external and n_port != n_tagged - 1),
}
add_acl_to_port(name, port, interfaces_config)
index += 1
if untagged_hosts:
n_port = 0
for vid, num_hosts in untagged_hosts.items():
for _ in range(num_hosts):
port = self.port_maps[dpid]['port_%d' % index]
interfaces_config[port] = {
'native_vlan': vid,
'loop_protect_external': (use_external and n_port != num_hosts - 1),
}
add_acl_to_port(name, port, interfaces_config)
index += 1
n_port += 1
add_dp_to_dp_ports(name, dpid, dp_config, interfaces_config, stack,
n_tagged, tagged_vid, untagged_hosts)
for portno, config in list(interfaces_config.items()):
stack = config.get('stack', None)
if stack and 'stack' in interfaces_config[portno]:
peer_portno = stack['port']
interfaces_config[portno]['stack'].update({
'port': 'b%u' % peer_portno})
dp_config['interfaces'] = interfaces_config
if dp_options:
for key, value in dp_options.items():
dp_config[key] = value
return dp_config
### Create config
config = {'version': 2}
if include:
config['include'] = list(include)
if include_optional:
config['include-optional'] = list(include_optional)
config['vlans'] = add_vlans(n_tagged, tagged_vid, untagged_hosts, router)
if router:
config['routers'] = add_router(router)
config['acls'] = acls.copy()
config['dps'] = {}
for i, dpid in enumerate(dpids):
dpid_names[dpid] = name = dp_name(i)
dpname_to_dpkey[name] = dpid
for i, dpid in enumerate(dpids):
name = dpid_names[dpid]
config['dps'][name] = add_dp(
name, dpid, hw_dpid, i, stack,
n_tagged, tagged_vid, untagged_hosts,
use_external, dp_options)
config_text = yaml.dump(config, default_flow_style=False)
return config_text
def verify_no_cable_errors(self):
i = 0
for dpid in self.dpids:
i += 1
labels = {'dp_id': '0x%x' % int(dpid), 'dp_name': 'faucet-%u' % i}
self.assertEqual(
0, self.scrape_prometheus_var(
var='stack_cabling_errors_total', labels=labels, default=None))
self.assertGreater(
self.scrape_prometheus_var(
var='stack_probes_received_total', labels=labels), 0)
def verify_stack_hosts(self, verify_bridge_local_rule=True, retries=3):
lldp_cap_files = []
for host in self.hosts_name_ordered():
lldp_cap_file = os.path.join(self.tmpdir, '%s-lldp.cap' % host)
lldp_cap_files.append(lldp_cap_file)
host.cmd(mininet_test_util.timeout_cmd(
'tcpdump -U -n -c 1 -i %s -w %s ether proto 0x88CC and not ether src %s &' % (
host.defaultIntf(), host.MAC(), lldp_cap_file), 60))
# should not flood LLDP from hosts
self.verify_lldp_blocked(self.hosts_name_ordered())
# hosts should see no LLDP probes
self.verify_empty_caps(lldp_cap_files)
if verify_bridge_local_rule:
# Verify 802.1x flood block triggered.
for dpid in self.dpids:
self.wait_nonzero_packet_count_flow(
{'dl_dst': '01:80:c2:00:00:00/ff:ff:ff:ff:ff:f0'},
dpid=dpid, table_id=self._FLOOD_TABLE, ofa_match=False)
self.retry_net_ping(retries=retries)
def stack_port_status(self, dpid, dp_name, port_no):
labels = self.port_labels(port_no)
labels.update({'dp_id': '0x%x' % int(dpid), 'dp_name': dp_name})
return self.scrape_prometheus_var(
'port_stack_state', labels=labels,
default=None, dpid=False)
def wait_for_stack_port_status(self, dpid, dp_name, port_no, status, timeout=25):
labels = self.port_labels(port_no)
labels.update({'dp_id': '0x%x' % int(dpid), 'dp_name': dp_name})
if not self.wait_for_prometheus_var(
'port_stack_state', status, labels=labels,
default=None, dpid=False, timeout=timeout):
self.fail('did not get expected dpid %x port %u port_stack_state %u' % (
int(dpid), port_no, status))
def verify_stack_up(self, prop=1.0, timeout=25):
for _ in range(timeout):
links = 0
links_up = 0
for i, dpid in enumerate(self.dpids, start=1):
dp_name = 'faucet-%u' % i
for link in self.non_host_links(dpid):
status = self.stack_port_status(dpid, dp_name, link.port)
links += 1
if status == 3: # up
links_up += 1
prop_up = links_up / links
if prop_up >= prop:
return
time.sleep(1)
self.fail('not enough links up: %f / %f' % (links_up, links))
def verify_one_stack_down(self, stack_offset_port, coldstart=False):
self.retry_net_ping()
stack_port = self.non_host_links(self.dpid)[stack_offset_port].port
remote_stack_port = self.non_host_links(self.dpid)[stack_offset_port].peer_port
self.set_port_down(stack_port, wait=False)
# self.dpids[1] is the intermediate switch.
self.set_port_down(remote_stack_port, self.dpids[1], wait=False)
# test case where one link is down when coldstarted.
if coldstart:
self.coldstart_conf()
self.verify_stack_up(prop=0.75)
self.verify_stack_hosts(verify_bridge_local_rule=False)
# Broadcast works, and first switch doesn't see broadcast packet ins from stack.
packet_in_before_broadcast = self.scrape_prometheus_var('of_vlan_packet_ins')
self.verify_broadcast()
packet_in_after_broadcast = self.scrape_prometheus_var('of_vlan_packet_ins')
self.assertEqual(
packet_in_before_broadcast,
packet_in_after_broadcast)
# TODO: re-enable.
# self.verify_no_cable_errors()
def verify_no_arp_storm(self, ping_host, tcpdump_host):
num_arp_expected = self.topo.switch_to_switch_links * 2
tcpdump_filter = 'arp and ether src %s' % ping_host.MAC()
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: ping_host.cmd('arp -d %s' % tcpdump_host.IP()),
lambda: ping_host.cmd('ping -c1 %s' % tcpdump_host.IP())],
packets=(num_arp_expected+1))
num_arp_received = len(re.findall(
'who-has %s tell %s' % (tcpdump_host.IP(), ping_host.IP()), tcpdump_txt))
self.assertTrue(num_arp_received)
self.assertLessEqual(num_arp_received, num_arp_expected)
def verify_stack_has_no_loop(self):
for ping_host, tcpdump_host in (
(self.hosts_name_ordered()[0], self.hosts_name_ordered()[-1]),
(self.hosts_name_ordered()[-1], self.hosts_name_ordered()[0])):
self.verify_no_arp_storm(ping_host, tcpdump_host)
def verify_all_stack_hosts(self):
for _ in range(2):
self.verify_stack_up()
self.verify_no_cable_errors()
self.verify_stack_hosts()
self.verify_traveling_dhcp_mac()
self.verify_unicast_not_looped()
self.verify_no_bcast_to_self()
self.verify_stack_has_no_loop()
self.flap_all_switch_ports()
def verify_tunnel_established(self, src_host, dst_host, other_host, packets=3):
"""Verify ICMP packets tunnelled from src to dst."""
icmp_match = {'eth_type': IPV4_ETH, 'ip_proto': 1}
self.wait_until_matching_flow(icmp_match, table_id=self._PORT_ACL_TABLE, ofa_match=False)
tcpdump_text = self.tcpdump_helper(
dst_host, 'icmp[icmptype] == 8', [
# need to set static ARP as only ICMP is tunnelled.
lambda: src_host.cmd('arp -s %s %s' % (other_host.IP(), other_host.MAC())),
lambda: src_host.cmd('ping -c%u -t1 %s' % (packets, other_host.IP()))
],
packets=1, timeout=(packets + 1),
)
self.wait_nonzero_packet_count_flow(
icmp_match, table_id=self._PORT_ACL_TABLE, ofa_match=False)
self.assertTrue(re.search(
'%s: ICMP echo request' % other_host.IP(), tcpdump_text
), 'Tunnel was not established')
def map_int_ext_hosts(self):
conf = self._get_faucet_conf()
host_name_map = {host.name: host for host in self.hosts_name_ordered()}
int_hosts = set()
ext_hosts = set()
dp_hosts = {}
for dp_name, dp_conf in conf['dps'].items():
dpid = int(dp_conf['dp_id'])
dp_int_hosts = set()
dp_ext_hosts = set()
for p, p_conf in dp_conf['interfaces'].items():
if 'stack' in p_conf:
continue
host = host_name_map[self.net.topo.dpid_port_host[dpid][p]]
if p_conf.get('loop_protect_external', False):
dp_ext_hosts.add(host)
else:
dp_int_hosts.add(host)
dp_hosts[dp_name] = (dp_int_hosts, dp_ext_hosts)
int_hosts.update(dp_int_hosts)
ext_hosts.update(dp_ext_hosts)
return int_hosts, ext_hosts, dp_hosts
class FaucetSingleUntaggedIPV4RoutingWithStackingTest(FaucetStringOfDPTest):
"""IPV4 intervlan routing with stacking test"""
IPV = 4
NETPREFIX = 24
ETH_TYPE = IPV4_ETH
SWITCH_TO_SWITCH_LINKS = 1
NUM_DPS = 4
V100 = 100
V200 = 200
V300 = 300
V100_NUM_HOSTS = 1
V200_NUM_HOSTS = 1
V300_NUM_HOSTS = 0
FAUCET_MAC2 = '0e:00:00:00:00:02'
@staticmethod
def get_dp_options():
return {
'drop_spoofed_faucet_mac': False,
'arp_neighbor_timeout': 2,
'max_resolve_backoff_time': 1,
'proactive_learn_v4': True
}
def setUp(self):
pass
def set_up(self):
super(FaucetSingleUntaggedIPV4RoutingWithStackingTest, self).setUp()
router_info = {
self.V100: {
'faucet_mac': self.FAUCET_MAC,
'faucet_vips': [self.get_faucet_vip(1)]
},
self.V200: {
'faucet_mac': self.FAUCET_MAC2,
'faucet_vips': [self.get_faucet_vip(2)]
}
}
untagged_hosts = {self.V100: self.V100_NUM_HOSTS,
self.V200: self.V200_NUM_HOSTS,
self.V300: self.V300_NUM_HOSTS}
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts=untagged_hosts,
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS,
hw_dpid=self.hw_dpid,
router=router_info,
dp_options=self.get_dp_options()
)
self.start_net()
def get_faucet_mac(self, vindex):
"""Get the faucet MAC"""
return '0e:00:00:00:00:0%u' % vindex
def get_faucet_vip(self, vindex):
"""Get the IPV4 faucet vip"""
return '10.%u00.0.254/%u' % (vindex, self.NETPREFIX)
def get_ip(self, host_n, vindex):
"""Get the IPV4 host ip"""
return '10.%u00.0.%u/%u' % (vindex, host_n, self.NETPREFIX)
def host_ping(self, src_host, dst_ip):
"""ping host"""
self.one_ipv4_ping(src_host, dst_ip, require_host_learned=False)
def set_host_ip(self, host, ip):
"""Set the host ip"""
host.setIP(str(ip.ip), prefixLen=self.NETPREFIX)
def verify_intervlan_routing(self):
"""Setup host routes and verify intervlan routing is possible"""
num_hosts = self.V100_NUM_HOSTS + self.V200_NUM_HOSTS + self.V300_NUM_HOSTS
first_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(1))
second_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(2))
v100_hosts = [(self.hosts_name_ordered()[i], ipaddress.ip_interface(
self.get_ip(i+1, 1))) for i in range(len(self.hosts_name_ordered())) if (i % num_hosts) == 0]
v200_hosts = [(self.hosts_name_ordered()[i], ipaddress.ip_interface(
self.get_ip(i+1, 2))) for i in range(len(self.hosts_name_ordered())) if (i % num_hosts) == 1]
for host_tuple in v100_hosts:
host, host_ip = host_tuple
self.set_host_ip(host, host_ip)
for host_tuple in v200_hosts:
host, host_ip = host_tuple
self.set_host_ip(host, host_ip)
for v100_host_tuple in v100_hosts:
v100_host, v100_host_ip = v100_host_tuple
for v200_host_tuple in v200_hosts:
v200_host, v200_host_ip = v200_host_tuple
self.add_host_route(v100_host, v200_host_ip, first_faucet_vip.ip)
self.add_host_route(v200_host, v100_host_ip, second_faucet_vip.ip)
self.host_ping(v100_host, v200_host_ip.ip)
self.host_ping(v200_host, v100_host_ip.ip)
self.assertEqual(
self._ip_neigh(v100_host, first_faucet_vip.ip, self.IPV), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(v200_host, second_faucet_vip.ip, self.IPV), self.FAUCET_MAC2)
for src_host_tuple in v100_hosts:
src_host, _ = src_host_tuple
for dst_host_tuple in v100_hosts:
_, dst_ip = dst_host_tuple
if src_host_tuple == dst_host_tuple:
continue
self.host_ping(src_host, dst_ip.ip)
for src_host_tuple in v200_hosts:
src_host, _ = src_host_tuple
for dst_host_tuple in v200_hosts:
_, dst_ip = dst_host_tuple
if src_host_tuple == dst_host_tuple:
continue
self.host_ping(src_host, dst_ip.ip)
def test_intervlan_routing_stack_of_2_dp(self):
"""Verify intervlan routing works with 2 DPs in a stack"""
self.NUM_DPS = 2
self.set_up()
self.verify_stack_up()
self.verify_intervlan_routing()
def test_intervlan_routing_stack_of_3_dp(self):
"""Verify intervlan routing works with 3 DPs in a stack"""
self.NUM_DPS = 3
self.set_up()
self.verify_stack_up()
self.verify_intervlan_routing()
def test_intervlan_routing_stack_of_4_dp(self):
"""Verify intervlan routing works with 4 DPs in a stack"""
self.NUM_DPS = 4
self.set_up()
self.verify_stack_up()
self.verify_intervlan_routing()
def test_path_no_vlans(self):
"""Test when a DP in the path of a intervlan route contains no routed VLANs"""
self.NUM_DPS = 3
self.set_up()
first_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(1))
second_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(2))
v100_host = self.hosts_name_ordered()[0]
v100_host_ip = ipaddress.ip_interface(self.get_ip(1, 1))
v200_host = self.hosts_name_ordered()[5]
v200_host_ip = ipaddress.ip_interface(self.get_ip(2, 2))
# Remove all hosts on the middle DP by chaning them to hosts on VLAN300
# the middle DP now contains no hosts with VLAN 100 or VLAN 200
conf = self._get_faucet_conf()
interface_config = conf['dps']['faucet-2']['interfaces']
for port_key, port_dict in interface_config.items():
if 'stack' in port_dict:
continue
conf['dps']['faucet-2']['interfaces'][port_key]['native_vlan'] = self.V300
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
self.verify_stack_up()
self.set_host_ip(v100_host, v100_host_ip)
self.set_host_ip(v200_host, v200_host_ip)
self.add_host_route(v100_host, v200_host_ip, first_faucet_vip.ip)
self.add_host_route(v200_host, v100_host_ip, second_faucet_vip.ip)
self.host_ping(v100_host, v200_host_ip.ip)
self.host_ping(v200_host, v100_host_ip.ip)
self.assertEqual(
self._ip_neigh(v100_host, first_faucet_vip.ip, self.IPV), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(v200_host, second_faucet_vip.ip, self.IPV), self.FAUCET_MAC2)
def test_dp_contains_only_one_vlan_from_router(self):
"""Test when a DP has only one of the routed VLANs"""
self.NUM_DPS = 2
self.set_up()
first_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(1))
second_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(2))
v100_host = self.hosts_name_ordered()[0]
v100_host_ip = ipaddress.ip_interface(self.get_ip(1, 1))
v200_host = self.hosts_name_ordered()[3]
v200_host_ip = ipaddress.ip_interface(self.get_ip(2, 2))
# Remove host on VLAN100 by changing it to a host on VLAN300, there is now only
# one host on the DP that is being routed (200)
conf = self._get_faucet_conf()
interface_config = conf['dps']['faucet-2']['interfaces']
for port_key, port_dict in interface_config.items():
if 'stack' in port_dict:
continue
if port_dict['native_vlan'] == self.V100:
conf['dps']['faucet-2']['interfaces'][port_key]['native_vlan'] = self.V300
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
self.verify_stack_up()
self.set_host_ip(v100_host, v100_host_ip)
self.set_host_ip(v200_host, v200_host_ip)
self.add_host_route(v100_host, v200_host_ip, first_faucet_vip.ip)
self.add_host_route(v200_host, v100_host_ip, second_faucet_vip.ip)
self.host_ping(v100_host, v200_host_ip.ip)
self.host_ping(v200_host, v100_host_ip.ip)
self.assertEqual(
self._ip_neigh(v100_host, first_faucet_vip.ip, self.IPV), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(v200_host, second_faucet_vip.ip, self.IPV), self.FAUCET_MAC2)
class FaucetSingleUntaggedIPV6RoutingWithStackingTest(FaucetSingleUntaggedIPV4RoutingWithStackingTest):
"""IPV6 intervlan routing with stacking tests"""
IPV = 6
NETPREFIX = 64
ETH_TYPE = IPV6_ETH
def get_dp_options(self):
return {
'drop_spoofed_faucet_mac': False,
'nd_neighbor_timeout': 2,
'max_resolve_backoff_time': 1,
'proactive_learn_v6': True
}
def host_ping(self, src_host, dst_ip):
""" """
self.one_ipv6_ping(src_host, dst_ip, require_host_learned=False)
def set_host_ip(self, host, ip):
""" """
self.add_host_ipv6_address(host, ip)
def get_faucet_vip(self, vindex):
"""Get the IPV6 faucet vip"""
return 'fc0%u::1:254/112' % vindex
def get_ip(self, host_n, vindex):
"""Get the IPV6 host ip"""
return 'fc0%u::1:%u/64' % (vindex, host_n)
class FaucetStringOfDPUntaggedTest(FaucetStringOfDPTest):
NUM_DPS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetStringOfDPUntaggedTest, self).setUp()
self.build_net(
n_dps=self.NUM_DPS, untagged_hosts={self.VID: self.NUM_HOSTS})
self.start_net()
def test_untagged(self):
"""All untagged hosts in multi switch topology can reach one another."""
self.verify_stack_hosts()
self.verify_traveling_dhcp_mac()
class FaucetStringOfDPTaggedTest(FaucetStringOfDPTest):
NUM_DPS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetStringOfDPTaggedTest, self).setUp()
self.build_net(
n_dps=self.NUM_DPS, n_tagged=self.NUM_HOSTS, tagged_vid=self.VID)
self.start_net()
def test_tagged(self):
"""All tagged hosts in multi switch topology can reach one another."""
self.verify_stack_hosts(verify_bridge_local_rule=False)
self.verify_traveling_dhcp_mac()
class FaucetSingleStackStringOfDPTagged0Test(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with tagged hosts."""
NUM_DPS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackStringOfDPTagged0Test, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
n_tagged=self.NUM_HOSTS,
tagged_vid=self.VID,
switch_to_switch_links=2)
self.start_net()
def test_tagged(self):
"""All tagged hosts in stack topology can reach each other."""
self.verify_stack_up()
for coldstart in (False, True):
self.verify_one_stack_down(0, coldstart)
class FaucetSingleStackStringOfDPTagged1Test(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with tagged hosts."""
NUM_DPS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackStringOfDPTagged1Test, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
n_tagged=self.NUM_HOSTS,
tagged_vid=self.VID,
switch_to_switch_links=2)
self.start_net()
def test_tagged(self):
self.verify_stack_up()
for coldstart in (False, True):
self.verify_one_stack_down(1, coldstart)
class FaucetStringOfDPLACPUntaggedTest(FaucetStringOfDPTest):
"""Test topology of LACP-connected datapaths with untagged hosts."""
NUM_DPS = 2
NUM_HOSTS = 2
match_bcast = {'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'}
action_str = 'OUTPUT:%u'
def setUp(self): # pylint: disable=invalid-name
super(FaucetStringOfDPLACPUntaggedTest, self).setUp()
self.build_net(
stack=False,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=2,
hw_dpid=self.hw_dpid,
lacp=True)
self.start_net()
def wait_for_lacp_status(self, port_no, wanted_status, dpid, dp_name, timeout=30):
labels = self.port_labels(port_no)
labels.update({'dp_id': '0x%x' % int(dpid), 'dp_name': dp_name})
if not self.wait_for_prometheus_var(
'port_lacp_status', wanted_status,
labels=labels, dpid=False, timeout=timeout):
self.fail('wanted LACP status for %s to be %u' % (labels, wanted_status))
def wait_for_lacp_port_down(self, port_no, dpid, dp_name):
self.wait_for_lacp_status(port_no, 0, dpid, dp_name)
def wait_for_lacp_port_up(self, port_no, dpid, dp_name):
self.wait_for_lacp_status(port_no, 1, dpid, dp_name)
# We sort non_host_links by port because FAUCET sorts its ports
# and only floods out of the first active LACP port in that list
def wait_for_all_lacp_up(self):
first_link, second_link = sorted(self.non_host_links(self.dpid))
first_lacp_port, second_lacp_port = first_link.port, second_link.port
remote_first_lacp_port = first_link.peer_port
self.wait_for_lacp_port_up(first_lacp_port, self.dpid, self.DP_NAME)
self.wait_for_lacp_port_up(second_lacp_port, self.dpid, self.DP_NAME)
self.wait_until_matching_flow(
self.match_bcast, self._FLOOD_TABLE, actions=[self.action_str % first_lacp_port])
self.wait_until_matching_flow(
self.match_bcast, self._FLOOD_TABLE, actions=[self.action_str % remote_first_lacp_port],
dpid=self.dpids[1])
def test_lacp_port_down(self):
"""LACP to switch to a working port when the primary port fails."""
first_link, second_link = sorted(self.non_host_links(self.dpid))
first_lacp_port, second_lacp_port = first_link.port, second_link.port
remote_first_lacp_port, remote_second_lacp_port = first_link.peer_port, second_link.peer_port
self.wait_for_all_lacp_up()
self.retry_net_ping()
self.set_port_down(first_lacp_port, wait=False)
self.wait_for_lacp_port_down(first_lacp_port, self.dpid, self.DP_NAME)
self.wait_for_lacp_port_down(remote_first_lacp_port, self.dpids[1], 'faucet-2')
self.wait_until_matching_flow(
self.match_bcast, self._FLOOD_TABLE, actions=[self.action_str % second_lacp_port])
self.wait_until_matching_flow(
self.match_bcast, self._FLOOD_TABLE, actions=[
self.action_str % remote_second_lacp_port],
dpid=self.dpids[1])
self.retry_net_ping()
self.set_port_up(first_lacp_port)
def test_untagged(self):
"""All untagged hosts in stack topology can reach each other."""
for _ in range(3):
self.wait_for_all_lacp_up()
self.verify_stack_hosts()
self.flap_all_switch_ports()
def test_dyn_fail(self):
"""Test lacp fail on reload with dynamic lacp status."""
conf = self._get_faucet_conf()
first_link, second_link = sorted(self.non_host_links(self.dpid))
src_port, dst_port = first_link.port, second_link.port
fail_port = first_link.peer_port
self.wait_for_lacp_port_up(src_port, self.dpids[0], 'faucet-1')
self.wait_for_lacp_port_up(dst_port, self.dpids[0], 'faucet-1')
conf['dps']['faucet-2']['interfaces'][fail_port]['lacp'] = 0
conf['dps']['faucet-2']['interfaces'][fail_port]['lacp_active'] = False
self.reload_conf(conf, self.faucet_config_path, restart=True,
cold_start=False, change_expected=False)
self.wait_for_lacp_port_down(src_port, self.dpids[0], 'faucet-1')
self.wait_for_lacp_port_up(dst_port, self.dpids[0], 'faucet-1')
def test_passthrough(self):
"""Test lacp passthrough on port fail."""
conf = self._get_faucet_conf()
first_link, second_link = self.non_host_links(self.dpid)
src_port, dst_port = first_link.port, second_link.port
fail_port, end_port = first_link.peer_port, second_link.peer_port
conf['dps']['faucet-1']['interfaces'][dst_port]['lacp_passthrough'] = [src_port]
conf['dps']['faucet-1']['interfaces'][dst_port]['loop_protect_external'] = True
conf['dps']['faucet-1']['interfaces'][dst_port]['lacp'] = 2
conf['dps']['faucet-1']['interfaces'][src_port]['loop_protect_external'] = True
conf['dps']['faucet-2']['interfaces'][fail_port]['loop_protect_external'] = True
conf['dps']['faucet-2']['interfaces'][end_port]['loop_protect_external'] = True
conf['dps']['faucet-2']['interfaces'][end_port]['lacp'] = 2
self.reload_conf(conf, self.faucet_config_path, restart=True,
cold_start=False, change_expected=False)
self.wait_for_all_lacp_up()
self.verify_stack_hosts()
conf['dps']['faucet-2']['interfaces'][fail_port]['lacp'] = 0
conf['dps']['faucet-2']['interfaces'][fail_port]['lacp_active'] = False
self.reload_conf(conf, self.faucet_config_path, restart=True,
cold_start=False, change_expected=False)
self.wait_for_lacp_port_down(src_port, self.dpids[0], 'faucet-1')
self.wait_for_lacp_port_up(dst_port, self.dpids[0], 'faucet-1')
self.wait_for_lacp_port_down(end_port, self.dpids[1], 'faucet-2')
class FaucetStackStringOfDPUntaggedTest(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with untagged hosts."""
NUM_DPS = 2
NUM_HOSTS = 2
def setUp(self): # pylint: disable=invalid-name
super(FaucetStackStringOfDPUntaggedTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=2,
hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
"""All untagged hosts in stack topology can reach each other."""
self.verify_stack_hosts()
class FaucetStackStringOfDPExtLoopProtUntaggedTest(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with untagged hosts."""
NUM_DPS = 2
NUM_HOSTS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetStackStringOfDPExtLoopProtUntaggedTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=2,
hw_dpid=self.hw_dpid,
use_external=True)
self.start_net()
def test_untagged(self):
"""Host can reach each other, unless both marked loop_protect_external"""
for host in self.hosts_name_ordered():
self.require_host_learned(host)
# Part 1: Make sure things are connected properly.
self.verify_protected_connectivity() # Before reload
# Part 2: Test the code on pipeline reconfiguration path.
self._mark_external(True)
self._mark_external(False)
# Part 3: Make sure things are the same after reload.
self.verify_protected_connectivity() # After reload
def _mark_external(self, protect_external):
conf = self._get_faucet_conf()
loop_port = self.non_host_links(self.dpids[1])[0].port
conf['dps']['faucet-2']['interfaces'][loop_port]['loop_protect_external'] = protect_external
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
def verify_protected_connectivity(self):
self.verify_stack_up()
int_hosts, ext_hosts, dp_hosts = self.map_int_ext_hosts()
for int_host in int_hosts:
# All internal hosts can reach other internal hosts.
for other_int_host in int_hosts - {int_host}:
self.verify_broadcast(hosts=(int_host, other_int_host), broadcast_expected=True)
self.verify_unicast(hosts=(int_host, other_int_host), unicast_expected=True)
for ext_host in ext_hosts:
# All external hosts cannot flood to each other
for other_ext_host in ext_hosts - {ext_host}:
self.verify_broadcast(hosts=(ext_host, other_ext_host), broadcast_expected=False)
for local_int_hosts, local_ext_hosts in dp_hosts.values():
local_int_host = list(local_int_hosts)[0]
remote_ext_hosts = ext_hosts - local_ext_hosts
# ext hosts on remote switch should not get traffic flooded from
# int host on local switch, because traffic already flooded to
# an ext host on local switch.
for remote_ext_host in remote_ext_hosts:
self.verify_broadcast(hosts=(local_int_host, remote_ext_host), broadcast_expected=False)
class FaucetSingleStackStringOf3DPExtLoopProtUntaggedTest(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with untagged hosts."""
NUM_DPS = 3
NUM_HOSTS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackStringOf3DPExtLoopProtUntaggedTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=2,
hw_dpid=self.hw_dpid,
use_external=True)
self.start_net()
def test_untagged(self):
self.verify_stack_up()
int_hosts, ext_hosts, dp_hosts = self.map_int_ext_hosts()
_, root_ext_hosts = dp_hosts[self.DP_NAME]
for int_host in int_hosts:
# All internal hosts can reach other internal hosts.
for other_int_host in int_hosts - {int_host}:
# force ARP to test broadcast.
int_host.cmd(
'arp -d %s' % other_int_host.IP())
self.one_ipv4_ping(
int_host, other_int_host.IP(), require_host_learned=False)
for ext_host in ext_hosts:
# All external hosts cannot flood to each other
for other_ext_host in ext_hosts - {ext_host}:
self.verify_broadcast(
hosts=(ext_host, other_ext_host), broadcast_expected=False)
remote_ext_hosts = ext_hosts - set(root_ext_hosts)
# int host should never be broadcast to an ext host that is not on the root.
for local_int_hosts, _ in dp_hosts.values():
local_int_host = list(local_int_hosts)[0]
for remote_ext_host in remote_ext_hosts:
self.verify_broadcast(
hosts=(local_int_host, remote_ext_host), broadcast_expected=False)
class FaucetGroupStackStringOfDPUntaggedTest(FaucetStackStringOfDPUntaggedTest):
"""Test topology of stacked datapaths with untagged hosts."""
GROUP_TABLE = True
class FaucetStackRingOfDPTest(FaucetStringOfDPTest):
NUM_DPS = 3
NUM_HOSTS = 2
SOFTWARE_ONLY = True
def setUp(self): # pylint: disable=invalid-name
super(FaucetStackRingOfDPTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=2,
stack_ring=True)
self.start_net()
self.first_host = self.hosts_name_ordered()[0]
self.second_host = self.hosts_name_ordered()[1]
self.fifth_host = self.hosts_name_ordered()[4]
self.last_host = self.hosts_name_ordered()[self.NUM_HOSTS * self.NUM_DPS - 1]
def one_stack_port_down(self):
port = self.non_host_links(self.dpid)[1].port
self.set_port_down(port, self.dpid)
self.wait_for_stack_port_status(self.dpid, self.DP_NAME, port, 2) # down
def test_untagged(self):
"""Stack loop prevention works and hosts can ping each others."""
self.verify_stack_up()
self.verify_stack_has_no_loop()
self.retry_net_ping()
self.verify_traveling_dhcp_mac()
def test_stack_down(self):
"""Verify if a link down is reflected on stack-topology."""
self.verify_stack_up()
# ping first pair
self.retry_net_ping([self.first_host, self.last_host])
self.one_stack_port_down()
# ping fails for now because failures are not handled yet
self.retry_net_ping([self.first_host, self.last_host], required_loss=100, retries=1)
# newly learned hosts should work
self.retry_net_ping([self.second_host, self.fifth_host])
class FaucetSingleStackAclControlTest(FaucetStringOfDPTest):
"""Test ACL control of stacked datapaths with untagged hosts."""
NUM_DPS = 3
NUM_HOSTS = 3
def acls(self):
map1, map2, map3 = [self.port_maps[dpid] for dpid in self.dpids]
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'nw_dst': '10.0.0.2',
'actions': {
'output': {
'port': map1['port_2']
}
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'dl_dst': 'ff:ff:ff:ff:ff:ff',
'actions': {
'output': {
'ports': [
map1['port_2'],
map1['port_4']]
}
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'actions': {
'output': {
'port': map1['port_4']
}
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
2: [
{'rule': {
'dl_type': IPV4_ETH,
'actions': {
'output': {
'port': map2['port_5']
}
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
3: [
{'rule': {
'dl_type': IPV4_ETH,
'nw_dst': '10.0.0.7',
'actions': {
'output': {
'port': map3['port_1']
}
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'dl_dst': 'ff:ff:ff:ff:ff:ff',
'actions': {
'output': {
'ports': [map3['port_1']]
}
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'actions': {
'allow': 0,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# DP-to-acl_in port mapping.
def acl_in_dp(self):
map1, map2, map3 = [self.port_maps[dpid] for dpid in self.dpids]
return {
'faucet-1': {
# Port 1, acl_in = 1
map1['port_1']: 1,
},
'faucet-2': {
# Port 4, acl_in = 2
map2['port_4']: 2,
},
'faucet-3': {
# Port 4, acl_in = 3
map3['port_4']: 3,
},
}
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackAclControlTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
)
self.start_net()
def test_unicast(self):
"""Hosts in stack topology can appropriately reach each other over unicast."""
hosts = self.hosts_name_ordered()
self.verify_stack_up()
self.verify_tp_dst_notblocked(5000, hosts[0], hosts[1], table_id=None)
self.verify_tp_dst_blocked(5000, hosts[0], hosts[3], table_id=None)
self.verify_tp_dst_notblocked(5000, hosts[0], hosts[6], table_id=None)
self.verify_tp_dst_blocked(5000, hosts[0], hosts[7], table_id=None)
self.verify_no_cable_errors()
def test_broadcast(self):
"""Hosts in stack topology can appropriately reach each other over broadcast."""
hosts = self.hosts_name_ordered()
self.verify_stack_up()
self.verify_bcast_dst_notblocked(5000, hosts[0], hosts[1])
self.verify_bcast_dst_blocked(5000, hosts[0], hosts[3])
self.verify_bcast_dst_notblocked(5000, hosts[0], hosts[6])
self.verify_bcast_dst_blocked(5000, hosts[0], hosts[7])
self.verify_no_cable_errors()
class FaucetStringOfDPACLOverrideTest(FaucetStringOfDPTest):
NUM_DPS = 1
NUM_HOSTS = 2
# ACL rules which will get overridden.
def acls(self):
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5001,
'actions': {
'allow': 1,
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5002,
'actions': {
'allow': 0,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# ACL rules which get put into an include-optional
# file, then reloaded into FAUCET.
def acls_override(self):
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5001,
'actions': {
'allow': 0,
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5002,
'actions': {
'allow': 1,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# DP-to-acl_in port mapping.
def acl_in_dp(self):
port_1 = self.port_map['port_1']
return {
'faucet-1': {
# First port, acl_in = 1
port_1: 1,
},
}
def setUp(self): # pylint: disable=invalid-name
super(FaucetStringOfDPACLOverrideTest, self).setUp()
self.acls_config = os.path.join(self.tmpdir, 'acls.yaml')
missing_config = os.path.join(self.tmpdir, 'missing_config.yaml')
self.build_net(
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
include_optional=[self.acls_config, missing_config],
)
self.start_net()
def test_port5001_blocked(self):
"""Test that TCP port 5001 is blocked."""
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5001, first_host, second_host)
with open(self.acls_config, 'w') as config_file:
config_file.write(self.get_config(acls=self.acls_override()))
self.verify_faucet_reconf(cold_start=False, change_expected=True)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_no_cable_errors()
def test_port5002_notblocked(self):
"""Test that TCP port 5002 is not blocked."""
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5002, first_host, second_host)
with open(self.acls_config, 'w') as config_file:
config_file.write(self.get_config(acls=self.acls_override()))
self.verify_faucet_reconf(cold_start=False, change_expected=True)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.verify_no_cable_errors()
class FaucetTunnelSameDpTest(FaucetStringOfDPTest):
NUM_DPS = 2
NUM_HOSTS = 2
SWITCH_TO_SWITCH_LINKS = 2
VID = 100
def acls(self):
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 1,
'actions': {
'allow': 0,
'output': {
'tunnel': {
'type': 'vlan',
'tunnel_id': 200,
'dp': 'faucet-1',
'port': 'b%(port_2)d'}
}
}
}}
]
}
# DP-to-acl_in port mapping.
def acl_in_dp(self):
port_1 = self.port_map['port_1']
return {
'faucet-1': {
# First port 1, acl_in = 1
port_1: 1,
}
}
def setUp(self): # pylint: disable=invalid-name
super(FaucetTunnelSameDpTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS,
hw_dpid=self.hw_dpid,
)
self.start_net()
def test_tunnel_established(self):
"""Test a tunnel path can be created."""
self.verify_stack_up()
src_host, dst_host, other_host = self.hosts_name_ordered()[:3]
self.verify_tunnel_established(src_host, dst_host, other_host)
class FaucetTunnelTest(FaucetStringOfDPTest):
NUM_DPS = 2
NUM_HOSTS = 2
SWITCH_TO_SWITCH_LINKS = 2
VID = 100
def acls(self):
dpid2 = self.dpids[1]
port2_1 = self.port_maps[dpid2]['port_1']
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 1,
'actions': {
'allow': 0,
'output': {
'tunnel': {
'type': 'vlan',
'tunnel_id': 200,
'dp': 'faucet-2',
'port': port2_1}
}
}
}}
]
}
# DP-to-acl_in port mapping.
def acl_in_dp(self,):
port_1 = self.port_map['port_1']
return {
'faucet-1': {
# First port 1, acl_in = 1
port_1: 1,
}
}
def setUp(self): # pylint: disable=invalid-name
super(FaucetTunnelTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS,
hw_dpid=self.hw_dpid,
)
self.start_net()
def one_stack_port_down(self, stack_port):
self.set_port_down(stack_port, self.dpid)
self.wait_for_stack_port_status(self.dpid, self.DP_NAME, stack_port, 2)
def test_tunnel_established(self):
"""Test a tunnel path can be created."""
self.verify_stack_up()
src_host, other_host, dst_host = self.hosts_name_ordered()[:3]
self.verify_tunnel_established(src_host, dst_host, other_host)
def test_tunnel_path_rerouted(self):
"""Test a tunnel path is rerouted when a stack is down."""
self.verify_stack_up()
first_stack_port = self.non_host_links(self.dpid)[0].port
self.one_stack_port_down(first_stack_port)
src_host, other_host, dst_host = self.hosts_name_ordered()[:3]
self.verify_tunnel_established(src_host, dst_host, other_host, packets=10)
self.set_port_up(first_stack_port, self.dpid)
class FaucetGroupTableTest(FaucetUntaggedTest):
CONFIG = """
group_table: True
""" + CONFIG_BOILER_UNTAGGED
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE))
class FaucetTaggedGroupTableTest(FaucetTaggedTest):
CONFIG = """
group_table: True
""" + CONFIG_TAGGED_BOILER
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE))
class FaucetEthSrcMaskTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_src: 0e:0d:00:00:00:00/ff:ff:00:00:00:00
actions:
allow: 1
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:0d:00:00:00:99')
self.retry_net_ping(hosts=(first_host, second_host))
self.wait_nonzero_packet_count_flow(
{'dl_src': '0e:0d:00:00:00:00/ff:ff:00:00:00:00'},
table_id=self._PORT_ACL_TABLE)
class FaucetDestRewriteTest(FaucetUntaggedTest):
def override_mac():
return "0e:00:00:00:00:02"
OVERRIDE_MAC = override_mac()
def rewrite_mac():
return "0e:00:00:00:00:03"
REWRITE_MAC = rewrite_mac()
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "%s"
actions:
allow: 1
output:
set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (override_mac(), rewrite_mac())
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expect to see the rewritten mac address.
tcpdump_filter = ('icmp and ether dst %s' % self.REWRITE_MAC)
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), self.OVERRIDE_MAC)),
lambda: first_host.cmd('ping -c1 -t1 %s' % second_host.IP())],
timeout=5, packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdump_host):
overridden_host.setMAC(self.OVERRIDE_MAC)
rewrite_host.setMAC(self.REWRITE_MAC)
rewrite_host.cmd('arp -s %s %s' % (overridden_host.IP(), overridden_host.MAC()))
rewrite_host.cmd('ping -c1 %s' % overridden_host.IP())
self.wait_until_matching_flow(
{'dl_dst': self.REWRITE_MAC},
table_id=self._ETH_DST_TABLE,
actions=['OUTPUT:%u' % self.port_map['port_3']])
tcpdump_filter = ('icmp and ether src %s and ether dst %s' % (
source_host.MAC(), rewrite_host.MAC()))
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: source_host.cmd(
'arp -s %s %s' % (rewrite_host.IP(), overridden_host.MAC())),
# this will fail if no reply
lambda: self.one_ipv4_ping(
source_host, rewrite_host.IP(), require_host_learned=False)],
timeout=3, packets=1)
# ping from h1 to h2.mac should appear in third host, and not second host, as
# the acl should rewrite the dst mac.
self.assertFalse(re.search(
'%s: ICMP echo request' % rewrite_host.IP(), tcpdump_txt))
def test_switching(self):
"""Tests that a acl can rewrite the destination mac address,
and the packet will only go out the port of the new mac.
(Continues through faucet pipeline)
"""
source_host, overridden_host, rewrite_host = self.hosts_name_ordered()[0:3]
self.verify_dest_rewrite(
source_host, overridden_host, rewrite_host, overridden_host)
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 1
use_idle_timeout: True
""" + CONFIG_BOILER_UNTAGGED
def wait_for_host_removed(self, host, in_port, timeout=5):
for _ in range(timeout):
if not self.host_learned(host, in_port=in_port, timeout=1):
return
self.fail('host %s still learned' % host)
def wait_for_flowremoved_msg(self, src_mac=None, dst_mac=None, timeout=30):
pattern = "OFPFlowRemoved"
mac = None
if src_mac:
pattern = "OFPFlowRemoved(.*)'eth_src': '%s'" % src_mac
mac = src_mac
if dst_mac:
pattern = "OFPFlowRemoved(.*)'eth_dst': '%s'" % dst_mac
mac = dst_mac
for _ in range(timeout):
for _, debug_log_name in self._get_ofchannel_logs():
with open(debug_log_name) as debug_log:
debug = debug_log.read()
if re.search(pattern, debug):
return
time.sleep(1)
self.fail('Not received OFPFlowRemoved for host %s' % mac)
def wait_for_host_log_msg(self, host_mac, msg):
log_file = self.env['faucet']['FAUCET_LOG']
host_log_re = r'.*%s %s.*' % (msg, host_mac)
self.wait_until_matching_lines_from_file(host_log_re, log_file)
def test_untagged(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[:2]
self.swap_host_macs(first_host, second_host)
for host, port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.require_host_learned(host, in_port=int(port))
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutRuleExpiredTest(FaucetWithUseIdleTimeoutTest):
def test_untagged(self):
"""Host that is actively sending should have its dst rule renewed as the
rule expires. Host that is not sending expires as usual.
"""
self.ping_all_when_learned()
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()
self.host_ipv4_alias(first_host, ipaddress.ip_interface('10.99.99.1/24'))
first_host.cmd('arp -s %s %s' % (second_host.IP(), second_host.MAC()))
first_host.cmd('timeout 120s ping -I 10.99.99.1 %s &' % second_host.IP())
for host in (second_host, third_host, fourth_host):
self.host_drop_all_ips(host)
self.wait_for_host_log_msg(first_host.MAC(), 'refreshing host')
self.assertTrue(self.host_learned(
first_host, in_port=int(self.port_map['port_1'])))
for host, port in (
(second_host, self.port_map['port_2']),
(third_host, self.port_map['port_3']),
(fourth_host, self.port_map['port_4'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.wait_for_host_log_msg(host.MAC(), 'expiring host')
self.wait_for_host_removed(host, in_port=int(port))
class FaucetDisconnectTest(FaucetUntaggedTest):
"""Test that switch works properly after repeated disconnections
caused by DPID mismatch"""
def update_config(self, dpid):
"""Update config with good/bad DPID"""
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['dp_id'] = int(dpid)
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
def test_untagged(self):
"""Run untagged test after disconnects and config update"""
# We update the config with a bad DPID and then wait for
# 'unknown datapath' messages, indicating switch connections that
# FAUCET has rejected. The switch should see them as
# 'connection reset by peer'.
mask = int(16*'f', 16)
bad_dpid = (int(self.dpid) + 0xdeadbeef) & mask
faucet_log = self.env['faucet']['FAUCET_LOG']
self.update_config(dpid=bad_dpid)
self.wait_until_matching_lines_from_file(
r'.*ERROR.*unknown datapath', faucet_log, timeout=60, count=4)
self.update_config(dpid=self.dpid)
super().test_untagged()
class FaucetBadFlowModTest(FaucetUntaggedTest):
"""Test that switch and FAUCET still work after we send some bad flow_mods"""
def base_flow_mod(self):
"""Return a base flow mod that we mess with"""
return {'dpid': self.dpid,
'cookie': 0,
'cookie_mask': 0,
'table_id': 0,
'idle_timeout': 29,
'hard_timeout': 91,
'flags': 1,
'priority': 1,
'match': {'in_port': 1},
'actions': [{
'type': 'OUTPUT',
'port': 2}]}
# For now, the flow_mods are reasonably well-formed but with
# parameters that are incorrect for the switch and for FAUCET
def bad_dpid(self):
"""Return a random, bad dpid parameter"""
mask = int(16*'f', 16)
dpid = (int(self.dpid) + random.randint(0, 1<<63)) & mask
return {'dpid': dpid}
@staticmethod
def bad_table():
"""Return a bad table ID parameter"""
# This should be higher than FAUCET's max table ID
bad_table_start = 32
return {'table_id': random.randint(bad_table_start, 100)}
def bad_port(self):
"""Return a (hopefully very) bad port number"""
max_port = max(self.port_map.values())
offset = random.randint(0x1000, 0xE0000000)
mask = 0xEFFFFFFF
return (max_port + offset) & mask
def bad_match(self):
"""Return a bad match field"""
matches = (
# Bad input port
{'in_port': self.bad_port()},
# IPv4 (broadcast) src with bad ('reserved') ethertype
{'nw_src': '255.255.255.255', 'dl_type': 0xFFFF},
# IPv4 with IPv6 ethertype:
{'nw_src': '1.2.3.4', 'dl_type': 0x86DD},
# IPv4 address as IPv6 dst
{'ipv6_dst': '1.2.3.4', 'dl_type': 0x86DD},
# IPv6 dst with Bad/reserved ip_proto
{'ipv6_dst': '2001::aaaa:bbbb:cccc:1111', 'ip_proto': 255},
# Destination port but no transport protocol
{'tp_dst': 80},
# ARP opcode on non-ARP packetx
{'arp_op': 0x3, 'dl_type': 0x1234})
match = random.sample(matches, 1)[0]
return {'match': match}
def bad_actions(self, count=1):
"""Return a questionable actions parameter"""
actions = (
{'type': 'OUTPUT', 'port': self.bad_port()},
{'type': 'PUSH_MPLS', 'ethertype': 0x8BAD},
{'type': 'SET_QUEUE', 'queue_id':
random.randint(0x8000, 0xFFFFFFFF)})
return {'actions': random.sample(actions, count)}
# Possible options for bad parameters
bad_options = ('dpid', 'table', 'match', 'actions')
def bad_flow_mod(self):
"""Return a flow mod with some bad parameters"""
flow_mod = self.base_flow_mod()
# Add two or more bad options
options = random.sample(self.bad_options,
random.randint(2, len(self.bad_options)))
for option in options:
param = getattr(self, 'bad_%s' % option)()
flow_mod.update(param)
return flow_mod
def send_flow_mod(self, flow_mod, timeout=5):
"""Send flow_mod to switch via ofctl"""
int_dpid = mininet_test_util.str_int_dpid(self.dpid)
return self._ofctl_post(int_dpid, 'stats/flowentry/modify',
timeout=timeout, params=flow_mod)
def tearDown(self, ignore_oferrors=True):
"""Ignore OF errors on teardown"""
oferrors = super().tearDown(ignore_oferrors)
oferrors = re.findall(r'type: (\w+)', oferrors)
counter = collections.Counter(oferrors)
error('Ignored OF error count: %s\n' % dict(counter))
# TODO: ensure at least one error is always generated.
# pylint: disable=arguments-differ
def test_untagged(self, count=10):
"""Send a bunch of bad flow mods, then verify connectivity"""
for _ in range(count):
flow_mod = self.bad_flow_mod()
error('sending bad flow_mod', flow_mod, '\n')
self.send_flow_mod(flow_mod)
self.ping_all_when_learned()
class FaucetUntaggedMorePortsBase(FaucetUntaggedTest):
"""Base class for untagged test with more ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 16 # Maximum number of ports to test
EVENT_LOGGER_TIMEOUT = 180 # Timeout for event logger process
# Config lines for additional ports
CONFIG_EXTRA_PORT = """
{port}:
native_vlan: 100""" + "\n"
def _init_faucet_config(self): # pylint: disable=invalid-name
"""Extend config with more ports if needed"""
self.assertTrue(self.CONFIG.endswith(CONFIG_BOILER_UNTAGGED))
# We know how to extend the config for more ports
base_port_count = len(re.findall('port', CONFIG_BOILER_UNTAGGED))
ports = self.topo.dpid_ports(self.dpid)
for port in ports[base_port_count:]:
self.CONFIG += self.CONFIG_EXTRA_PORT.format(port=port)
super()._init_faucet_config()
def setUp(self):
"""Make sure N_UNTAGGED doesn't exceed hw port count"""
if self.config and self.config.get('hw_switch', False):
self.N_UNTAGGED = min(len(self.config['dp_ports']),
self.N_UNTAGGED)
error('(%d ports) ' % self.N_UNTAGGED)
super().setUp()
class FaucetUntagged32PortTest(FaucetUntaggedMorePortsBase):
"""Untagged test with up to 32 ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 32 # Maximum number of ports to test
@unittest.skip('slow and potentially unreliable on travis')
class FaucetUntagged48PortTest(FaucetUntaggedMorePortsBase):
"""Untagged test with up to 48 ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 48 # Maximum number of ports to test
EVENT_LOGGER_TIMEOUT = 360 # Timeout for event logger process
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
from threading import Thread
import time
import csv
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electron Cash'})
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electron-Cash'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def get_historical_rates_safe(self, ccy):
try:
self.print_error("requesting fx history for", ccy)
self.history[ccy] = self.historical_rates(ccy)
self.print_error("received fx history for", ccy)
self.on_history()
except BaseException as e:
self.print_error("failed fx history:", e)
def get_historical_rates(self, ccy):
result = self.history.get(ccy)
if not result and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy,))
t.setDaemon(True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/short')
return dict([(r.replace("BTC", ""), Decimal(json[r]['last']))
for r in json if r != 'timestamp'])
def history_ccys(self):
return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
'ZAR']
def historical_rates(self, ccy):
history = self.get_csv('apiv2.bitcoinaverage.com',
"/indices/global/history/BTC%s?period=alltime&format=csv" % ccy)
return dict([(h['DateTime'][:10], h['Average'])
for h in history])
class Bitcointoyou(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitcointoyou.com', "/API/ticker.aspx")
return {'BRL': Decimal(json['ticker']['last'])}
def history_ccys(self):
return ['BRL']
class BitcoinVenezuela(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinvenezuela.com', '/')
rates = [(r, json['BTC'][r]) for r in json['BTC']
if json['BTC'][r] is not None] # Giving NULL for LTC
return dict(rates)
def history_ccys(self):
return ['ARS', 'EUR', 'USD', 'VEF']
def historical_rates(self, ccy):
return self.get_json('api.bitcoinvenezuela.com',
"/historical/index.php?coin=BTC")[ccy +'_BTC']
class Bitmarket(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitmarket.pl', '/json/BTCPLN/ticker.json')
return {'PLN': Decimal(json['last'])}
class BitPay(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitpay.com', '/api/rates')
return dict([(r['code'], Decimal(r['rate'])) for r in json])
class Bitso(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitso.com', '/v2/ticker')
return {'MXN': Decimal(json['last'])}
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitstamp.net', '/api/ticker/')
return {'USD': Decimal(json['last'])}
class Bitvalor(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['total']['last'])}
class BlockchainInfo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('blockchain.info', '/ticker')
return dict([(r, Decimal(json[r]['15m'])) for r in json])
class BTCChina(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('data.btcchina.com', '/data/ticker')
return {'CNY': Decimal(json['ticker']['last'])}
class BTCParalelo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('btcparalelo.com', '/api/price')
return {'VEF': Decimal(json['price'])}
class Coinbase(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('coinbase.com',
'/api/v1/currencies/exchange_rates')
return dict([(r[7:].upper(), Decimal(json[r]))
for r in json if r.startswith('btc_to_')])
class CoinDesk(ExchangeBase):
def get_rates(self, ccy):
dicts = self.get_json('api.coindesk.com',
'/v1/bpi/supported-currencies.json')
json = self.get_json('api.coindesk.com',
'/v1/bpi/currentprice/%s.json' % ccy)
ccys = [d['currency'] for d in dicts]
result = dict.fromkeys(ccys)
result[ccy] = Decimal(json['bpi'][ccy]['rate_float'])
return result
def history_starts(self):
return { 'USD': '2012-11-30' }
def history_ccys(self):
return self.history_starts().keys()
def historical_rates(self, ccy):
start = self.history_starts()[ccy]
end = datetime.today().strftime('%Y-%m-%d')
# Note ?currency and ?index don't work as documented. Sigh.
query = ('/v1/bpi/historical/close.json?start=%s&end=%s'
% (start, end))
json = self.get_json('api.coindesk.com', query)
return json['bpi']
class Coinsecure(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinsecure.in', '/v0/noauth/newticker')
return {'INR': Decimal(json['lastprice'] / 100.0 )}
class Foxbit(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['FOX']['last'])}
class itBit(ExchangeBase):
def get_rates(self, ccy):
ccys = ['USD', 'EUR', 'SGD']
json = self.get_json('api.itbit.com', '/v1/markets/XBT%s/ticker' % ccy)
result = dict.fromkeys(ccys)
if ccy in ccys:
result[ccy] = Decimal(json['lastPrice'])
return result
class Kraken(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD']
pairs = ['Nano%s' % c for c in ccys]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
return dict((k[-3:], Decimal(float(v['c'][0])))
for k, v in json['result'].items())
class LocalBitcoins(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('localbitcoins.com',
'/bitcoinaverage/ticker-all-currencies/')
return dict([(r, Decimal(json[r]['rates']['last'])) for r in json])
class CoinFloor(ExchangeBase):
# CoinFloor API only supports GBP on public API
def get_rates(self, ccy):
json = self.get_json('webapi.coinfloor.co.uk:8090/bist/BTN/GBP', '/ticker/')
return {'GBP': Decimal(json['last'])}
class CEXIO(ExchangeBase):
# Cex.io supports GBP, USD, EUR, BTC
def get_rates(self, ccy):
json = self.get_json('cex.io', '/api/ticker/BTN/%s' % ccy)
return { ccy : Decimal(json['last'])}
class BtcMarkets(ExchangeBase):
# BtcMarkets - Australian Exchange - AUD
def get_rates(self, ccy):
json = self.get_json('api.btcmarkets.net', '/market/BTN/%s/tick' % ccy)
return { ccy : Decimal(json['lastPrice'])}
class MercadoBitcoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['MBT']['last'])}
class NegocieCoins(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['NEG']['last'])}
def history_ccys(self):
return ['BRL']
class Unocoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.unocoin.com', 'trade?buy')
return {'INR': Decimal(json)}
class WEX(ExchangeBase):
def get_rates(self, ccy):
json_eur = self.get_json('wex.nz', '/api/3/ticker/btc_eur')
json_rub = self.get_json('wex.nz', '/api/3/ticker/btc_rur')
json_usd = self.get_json('wex.nz', '/api/3/ticker/btc_usd')
return {'EUR': Decimal(json_eur['btc_eur']['last']),
'RUB': Decimal(json_rub['btc_rur']['last']),
'USD': Decimal(json_usd['btc_usd']['last'])}
class Winkdex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('winkdex.com', '/api/v0/price')
return {'USD': Decimal(json['price'] / 100.0)}
def history_ccys(self):
return ['USD']
def historical_rates(self, ccy):
json = self.get_json('winkdex.com',
"/api/v0/series?start_time=1342915200")
history = json['series'][0]['results']
return dict([(h['timestamp'][:10], h['price'] / 100.0)
for h in history])
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
except:
continue
with open(path, 'w') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.set_exchange(self.config_exchange())
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
return fmt_str.format(round(amount, prec))
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'Kraken')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, Kraken)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
def on_quotes(self):
self.network.trigger_callback('on_quotes')
def on_history(self):
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return Decimal(rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate is None else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate is None else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def value_str(self, satoshis, rate):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = Decimal(satoshis) / COIN * Decimal(rate)
return "%s" % (self.ccy_amount_str(value, True))
return _("No data")
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return rate
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
|
test_execute.py
|
import asyncio
import time
from threading import Thread
import dagster_pandas as dagster_pd
import pytest
from dagster import (
DagsterUnmetExecutorRequirementsError,
InputDefinition,
ModeDefinition,
execute_pipeline,
execute_pipeline_iterator,
file_relative_path,
pipeline,
reconstructable,
seven,
solid,
)
from dagster.core.definitions.executor import default_executors
from dagster.core.definitions.reconstructable import ReconstructablePipeline
from dagster.core.events import DagsterEventType
from dagster.core.test_utils import (
instance_for_test,
instance_for_test_tempdir,
nesting_composite_pipeline,
)
from dagster.utils import send_interrupt
from dagster_dask import DataFrame, dask_executor
from dask.distributed import Scheduler, Worker
@solid
def simple(_):
return 1
@pipeline(mode_defs=[ModeDefinition(executor_defs=default_executors + [dask_executor])])
def dask_engine_pipeline():
simple()
def test_execute_on_dask_local():
with seven.TemporaryDirectory() as tempdir:
with instance_for_test_tempdir(tempdir) as instance:
result = execute_pipeline(
reconstructable(dask_engine_pipeline),
run_config={
"intermediate_storage": {"filesystem": {"config": {"base_dir": tempdir}}},
"execution": {"dask": {"config": {"cluster": {"local": {"timeout": 30}}}}},
},
instance=instance,
)
assert result.result_for_solid("simple").output_value() == 1
def dask_composite_pipeline():
return nesting_composite_pipeline(
6, 2, mode_defs=[ModeDefinition(executor_defs=default_executors + [dask_executor])]
)
def test_composite_execute():
with instance_for_test() as instance:
result = execute_pipeline(
reconstructable(dask_composite_pipeline),
run_config={
"intermediate_storage": {"filesystem": {}},
"execution": {"dask": {"config": {"cluster": {"local": {"timeout": 30}}}}},
},
instance=instance,
)
assert result.success
@solid(input_defs=[InputDefinition("df", dagster_pd.DataFrame)])
def pandas_solid(_, df): # pylint: disable=unused-argument
pass
@pipeline(mode_defs=[ModeDefinition(executor_defs=default_executors + [dask_executor])])
def pandas_pipeline():
pandas_solid()
def test_pandas_dask():
run_config = {
"solids": {
"pandas_solid": {
"inputs": {"df": {"csv": {"path": file_relative_path(__file__, "ex.csv")}}}
}
}
}
with instance_for_test() as instance:
result = execute_pipeline(
ReconstructablePipeline.for_file(__file__, pandas_pipeline.name),
run_config={
"intermediate_storage": {"filesystem": {}},
"execution": {"dask": {"config": {"cluster": {"local": {"timeout": 30}}}}},
**run_config,
},
instance=instance,
)
assert result.success
@solid(input_defs=[InputDefinition("df", DataFrame)])
def dask_solid(_, df): # pylint: disable=unused-argument
pass
@pipeline(mode_defs=[ModeDefinition(executor_defs=default_executors + [dask_executor])])
def dask_pipeline():
dask_solid()
def test_dask():
run_config = {
"solids": {
"dask_solid": {
"inputs": {"df": {"csv": {"path": file_relative_path(__file__, "ex*.csv")}}}
}
}
}
with instance_for_test() as instance:
result = execute_pipeline(
ReconstructablePipeline.for_file(__file__, dask_pipeline.name),
run_config={
"intermediate_storage": {"filesystem": {}},
"execution": {"dask": {"config": {"cluster": {"local": {"timeout": 30}}}}},
**run_config,
},
instance=instance,
)
assert result.success
def test_execute_on_dask_local_with_intermediate_storage():
with seven.TemporaryDirectory() as tempdir:
with instance_for_test_tempdir(tempdir) as instance:
result = execute_pipeline(
reconstructable(dask_engine_pipeline),
run_config={
"intermediate_storage": {"filesystem": {"config": {"base_dir": tempdir}}},
"execution": {"dask": {"config": {"cluster": {"local": {"timeout": 30}}}}},
},
instance=instance,
)
assert result.result_for_solid("simple").output_value() == 1
def test_execute_on_dask_local_with_default_storage():
with pytest.raises(DagsterUnmetExecutorRequirementsError):
with instance_for_test() as instance:
result = execute_pipeline(
reconstructable(dask_engine_pipeline),
run_config={
"execution": {"dask": {"config": {"cluster": {"local": {"timeout": 30}}}}},
},
instance=instance,
)
assert result.result_for_solid("simple").output_value() == 1
@solid(input_defs=[InputDefinition("df", DataFrame)])
def sleepy_dask_solid(_, df): # pylint: disable=unused-argument
start_time = time.time()
while True:
time.sleep(0.1)
if time.time() - start_time > 120:
raise Exception("Timed out")
@pipeline(mode_defs=[ModeDefinition(executor_defs=default_executors + [dask_executor])])
def sleepy_dask_pipeline():
sleepy_dask_solid()
def test_dask_terminate():
run_config = {
"solids": {
"sleepy_dask_solid": {
"inputs": {"df": {"csv": {"path": file_relative_path(__file__, "ex*.csv")}}}
}
}
}
interrupt_thread = None
result_types = []
with instance_for_test() as instance:
try:
for result in execute_pipeline_iterator(
pipeline=ReconstructablePipeline.for_file(__file__, sleepy_dask_pipeline.name),
run_config=run_config,
instance=instance,
):
# Interrupt once the first step starts
if result.event_type == DagsterEventType.STEP_START and not interrupt_thread:
interrupt_thread = Thread(target=send_interrupt, args=())
interrupt_thread.start()
if result.event_type == DagsterEventType.STEP_FAILURE:
assert "KeyboardInterrupt" in result.event_specific_data.error.message
result_types.append(result.event_type)
assert False
except KeyboardInterrupt:
pass
interrupt_thread.join()
assert DagsterEventType.STEP_FAILURE in result_types
assert DagsterEventType.PIPELINE_FAILURE in result_types
def test_existing_scheduler():
def _execute(scheduler_address, instance):
return execute_pipeline(
reconstructable(dask_engine_pipeline),
run_config={
"intermediate_storage": {"filesystem": {}},
"execution": {
"dask": {"config": {"cluster": {"existing": {"address": scheduler_address}}}}
},
},
instance=instance,
)
async def _run_test():
with instance_for_test() as instance:
async with Scheduler() as scheduler:
async with Worker(scheduler.address) as _:
result = await asyncio.get_event_loop().run_in_executor(
None, _execute, scheduler.address, instance
)
assert result.success
assert result.result_for_solid("simple").output_value() == 1
asyncio.get_event_loop().run_until_complete(_run_test())
|
test_event_log.py
|
import os
import sys
import time
import traceback
from contextlib import contextmanager
import pytest
import sqlalchemy
from dagster import seven
from dagster.core.definitions import AssetMaterialization, ExpectationResult
from dagster.core.errors import DagsterEventLogInvalidForRun
from dagster.core.events import (
DagsterEvent,
DagsterEventType,
EngineEventData,
StepExpectationResultData,
StepMaterializationData,
)
from dagster.core.events.log import DagsterEventRecord
from dagster.core.execution.plan.objects import StepFailureData, StepSuccessData
from dagster.core.storage.event_log import (
ConsolidatedSqliteEventLogStorage,
InMemoryEventLogStorage,
SqlEventLogStorageMetadata,
SqlEventLogStorageTable,
SqliteEventLogStorage,
)
from dagster.core.storage.sql import create_engine
from dagster.seven import multiprocessing
@contextmanager
def create_in_memory_event_log_storage():
yield InMemoryEventLogStorage()
@contextmanager
def create_sqlite_run_event_logstorage():
with seven.TemporaryDirectory() as tmpdir_path:
yield SqliteEventLogStorage(tmpdir_path)
@contextmanager
def create_consolidated_sqlite_run_event_log_storage():
with seven.TemporaryDirectory() as tmpdir_path:
yield ConsolidatedSqliteEventLogStorage(tmpdir_path)
event_storage_test = pytest.mark.parametrize(
'event_storage_factory_cm_fn',
[
create_in_memory_event_log_storage,
create_sqlite_run_event_logstorage,
create_consolidated_sqlite_run_event_log_storage,
],
)
@event_storage_test
def test_init_log_storage(event_storage_factory_cm_fn):
with event_storage_factory_cm_fn() as storage:
if isinstance(storage, InMemoryEventLogStorage):
assert not storage.is_persistent
elif isinstance(storage, (SqliteEventLogStorage, ConsolidatedSqliteEventLogStorage)):
assert storage.is_persistent
else:
raise Exception("Invalid event storage type")
@event_storage_test
def test_log_storage_run_not_found(event_storage_factory_cm_fn):
with event_storage_factory_cm_fn() as storage:
assert storage.get_logs_for_run('bar') == []
@event_storage_test
def test_event_log_storage_store_events_and_wipe(event_storage_factory_cm_fn):
with event_storage_factory_cm_fn() as storage:
assert len(storage.get_logs_for_run('foo')) == 0
storage.store_event(
DagsterEventRecord(
None,
'Message2',
'debug',
'',
'foo',
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
'nonce',
event_specific_data=EngineEventData.in_process(999),
),
)
)
assert len(storage.get_logs_for_run('foo')) == 1
assert storage.get_stats_for_run('foo')
storage.wipe()
assert len(storage.get_logs_for_run('foo')) == 0
@event_storage_test
def test_event_log_storage_store_with_multiple_runs(event_storage_factory_cm_fn):
with event_storage_factory_cm_fn() as storage:
runs = ['foo', 'bar', 'baz']
for run_id in runs:
assert len(storage.get_logs_for_run(run_id)) == 0
storage.store_event(
DagsterEventRecord(
None,
'Message2',
'debug',
'',
run_id,
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.STEP_SUCCESS.value,
'nonce',
event_specific_data=StepSuccessData(duration_ms=100.0),
),
)
)
for run_id in runs:
assert len(storage.get_logs_for_run(run_id)) == 1
assert storage.get_stats_for_run(run_id).steps_succeeded == 1
storage.wipe()
for run_id in runs:
assert len(storage.get_logs_for_run(run_id)) == 0
@event_storage_test
def test_event_log_storage_watch(event_storage_factory_cm_fn):
def evt(name):
return DagsterEventRecord(
None,
name,
'debug',
'',
'foo',
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
'nonce',
event_specific_data=EngineEventData.in_process(999),
),
)
with event_storage_factory_cm_fn() as storage:
watched = []
watcher = lambda x: watched.append(x) # pylint: disable=unnecessary-lambda
assert len(storage.get_logs_for_run('foo')) == 0
storage.store_event(evt('Message1'))
assert len(storage.get_logs_for_run('foo')) == 1
assert len(watched) == 0
storage.watch('foo', 0, watcher)
storage.store_event(evt('Message2'))
storage.store_event(evt('Message3'))
storage.store_event(evt('Message4'))
attempts = 10
while len(watched) < 3 and attempts > 0:
time.sleep(0.1)
attempts -= 1
storage.end_watch('foo', watcher)
time.sleep(0.3) # this value scientifically selected from a range of attractive values
storage.store_event(evt('Message5'))
assert len(storage.get_logs_for_run('foo')) == 5
assert len(watched) == 3
storage.delete_events('foo')
assert len(storage.get_logs_for_run('foo')) == 0
assert len(watched) == 3
@event_storage_test
def test_event_log_storage_pagination(event_storage_factory_cm_fn):
def evt(name):
return DagsterEventRecord(
None,
name,
'debug',
'',
'foo',
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
'nonce',
event_specific_data=EngineEventData.in_process(999),
),
)
with event_storage_factory_cm_fn() as storage:
storage.store_event(evt('Message_0'))
storage.store_event(evt('Message_1'))
storage.store_event(evt('Message_2'))
assert len(storage.get_logs_for_run('foo')) == 3
assert len(storage.get_logs_for_run('foo', -1)) == 3
assert len(storage.get_logs_for_run('foo', 0)) == 2
assert len(storage.get_logs_for_run('foo', 1)) == 1
assert len(storage.get_logs_for_run('foo', 2)) == 0
@event_storage_test
def test_event_log_delete(event_storage_factory_cm_fn):
with event_storage_factory_cm_fn() as storage:
assert len(storage.get_logs_for_run('foo')) == 0
storage.store_event(
DagsterEventRecord(
None,
'Message2',
'debug',
'',
'foo',
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
'nonce',
event_specific_data=EngineEventData.in_process(999),
),
)
)
assert len(storage.get_logs_for_run('foo')) == 1
assert storage.get_stats_for_run('foo')
storage.delete_events('foo')
assert len(storage.get_logs_for_run('foo')) == 0
@event_storage_test
def test_event_log_get_stats_without_start_and_success(event_storage_factory_cm_fn):
# When an event log doesn't have a PIPELINE_START or PIPELINE_SUCCESS | PIPELINE_FAILURE event,
# we want to ensure storage.get_stats_for_run(...) doesn't throw an error.
with event_storage_factory_cm_fn() as storage:
assert len(storage.get_logs_for_run('foo')) == 0
assert storage.get_stats_for_run('foo')
def test_filesystem_event_log_storage_run_corrupted():
with seven.TemporaryDirectory() as tmpdir_path:
storage = SqliteEventLogStorage(tmpdir_path)
# URL begins sqlite:///
# pylint: disable=protected-access
with open(os.path.abspath(storage.conn_string_for_run_id('foo')[10:]), 'w') as fd:
fd.write('some nonsense')
with pytest.raises(sqlalchemy.exc.DatabaseError):
storage.get_logs_for_run('foo')
def test_filesystem_event_log_storage_run_corrupted_bad_data():
with seven.TemporaryDirectory() as tmpdir_path:
storage = SqliteEventLogStorage(tmpdir_path)
SqlEventLogStorageMetadata.create_all(create_engine(storage.conn_string_for_run_id('foo')))
with storage.connect('foo') as conn:
event_insert = SqlEventLogStorageTable.insert().values( # pylint: disable=no-value-for-parameter
run_id='foo', event='{bar}', dagster_event_type=None, timestamp=None
)
conn.execute(event_insert)
with pytest.raises(DagsterEventLogInvalidForRun):
storage.get_logs_for_run('foo')
SqlEventLogStorageMetadata.create_all(create_engine(storage.conn_string_for_run_id('bar')))
with storage.connect('bar') as conn: # pylint: disable=protected-access
event_insert = SqlEventLogStorageTable.insert().values( # pylint: disable=no-value-for-parameter
run_id='bar', event='3', dagster_event_type=None, timestamp=None
)
conn.execute(event_insert)
with pytest.raises(DagsterEventLogInvalidForRun):
storage.get_logs_for_run('bar')
def cmd(exceptions, tmpdir_path):
storage = SqliteEventLogStorage(tmpdir_path)
try:
with storage.connect('foo'):
pass
except Exception as exc: # pylint: disable=broad-except
exceptions.put(exc)
exc_info = sys.exc_info()
traceback.print_tb(exc_info[2])
def test_concurrent_sqlite_event_log_connections():
exceptions = multiprocessing.Queue()
with seven.TemporaryDirectory() as tmpdir_path:
ps = []
for _ in range(5):
ps.append(multiprocessing.Process(target=cmd, args=(exceptions, tmpdir_path)))
for p in ps:
p.start()
j = 0
for p in ps:
p.join()
j += 1
assert j == 5
excs = []
while not exceptions.empty():
excs.append(exceptions.get())
assert not excs, excs
@event_storage_test
def test_event_log_step_stats(event_storage_factory_cm_fn):
# When an event log doesn't have a PIPELINE_START or PIPELINE_SUCCESS | PIPELINE_FAILURE event,
# we want to ensure storage.get_stats_for_run(...) doesn't throw an error.
run_id = 'foo'
with event_storage_factory_cm_fn() as storage:
for record in _stats_records(run_id=run_id):
storage.store_event(record)
step_stats = storage.get_step_stats_for_run(run_id)
assert len(step_stats) == 4
a_stats = [stats for stats in step_stats if stats.step_key == 'A'][0]
assert a_stats.step_key == 'A'
assert a_stats.status.value == 'SUCCESS'
assert a_stats.end_time - a_stats.start_time == 100
b_stats = [stats for stats in step_stats if stats.step_key == 'B'][0]
assert b_stats.step_key == 'B'
assert b_stats.status.value == 'FAILURE'
assert b_stats.end_time - b_stats.start_time == 50
c_stats = [stats for stats in step_stats if stats.step_key == 'C'][0]
assert c_stats.step_key == 'C'
assert c_stats.status.value == 'SKIPPED'
assert c_stats.end_time - c_stats.start_time == 25
d_stats = [stats for stats in step_stats if stats.step_key == 'D'][0]
assert d_stats.step_key == 'D'
assert d_stats.status.value == 'SUCCESS'
assert d_stats.end_time - d_stats.start_time == 150
assert len(d_stats.materializations) == 3
assert len(d_stats.expectation_results) == 2
def _stats_records(run_id):
now = time.time()
return [
_event_record(run_id, 'A', now - 325, DagsterEventType.STEP_START),
_event_record(
run_id,
'A',
now - 225,
DagsterEventType.STEP_SUCCESS,
StepSuccessData(duration_ms=100000.0),
),
_event_record(run_id, 'B', now - 225, DagsterEventType.STEP_START),
_event_record(
run_id,
'B',
now - 175,
DagsterEventType.STEP_FAILURE,
StepFailureData(error=None, user_failure_data=None),
),
_event_record(run_id, 'C', now - 175, DagsterEventType.STEP_START),
_event_record(run_id, 'C', now - 150, DagsterEventType.STEP_SKIPPED),
_event_record(run_id, 'D', now - 150, DagsterEventType.STEP_START),
_event_record(
run_id,
'D',
now - 125,
DagsterEventType.STEP_MATERIALIZATION,
StepMaterializationData(AssetMaterialization(asset_key='mat_1')),
),
_event_record(
run_id,
'D',
now - 100,
DagsterEventType.STEP_EXPECTATION_RESULT,
StepExpectationResultData(ExpectationResult(success=True, label='exp 1')),
),
_event_record(
run_id,
'D',
now - 75,
DagsterEventType.STEP_MATERIALIZATION,
StepMaterializationData(AssetMaterialization(asset_key='mat_2')),
),
_event_record(
run_id,
'D',
now - 50,
DagsterEventType.STEP_EXPECTATION_RESULT,
StepExpectationResultData(ExpectationResult(success=False, label='exp 2')),
),
_event_record(
run_id,
'D',
now - 25,
DagsterEventType.STEP_MATERIALIZATION,
StepMaterializationData(AssetMaterialization(asset_key='mat_3')),
),
_event_record(
run_id, 'D', now, DagsterEventType.STEP_SUCCESS, StepSuccessData(duration_ms=150000.0)
),
]
def _event_record(run_id, step_key, timestamp, event_type, event_specific_data=None):
pipeline_name = 'pipeline_name'
return DagsterEventRecord(
None,
'',
'debug',
'',
run_id,
timestamp,
step_key=step_key,
pipeline_name=pipeline_name,
dagster_event=DagsterEvent(
event_type.value,
pipeline_name,
step_key=step_key,
event_specific_data=event_specific_data,
),
)
|
main.py
|
import argparse
from multiprocessing import Process
from pathlib import Path
import yaml
from stable_baselines3.common.logger import configure
from demac.src.demac.demac_agent_env_wrapper import AgentEnvWrapper
from demac.src.demac.demac_coordinator import Coordinator
from sample_envs.gridnav.grid import Grid
from sample_envs.gridnav.gridnav import GridNav
from sample_envs.meteor.meteor_env import MeteorEnv
from sample_envs.trivial.trivial_env import TrivialEnv
from stable_baselines3 import A2C
from stable_baselines3.ppo.policies import MlpPolicy
from stable_baselines3.common.callbacks import CheckpointCallback
parser = argparse.ArgumentParser(description='Check if we are training or testing.')
parser.add_argument('--test', dest='model_name', type=str, default=None)
parser.add_argument('--env', dest='env', type=str, default='trivial')
parser.add_argument('--exp_path', dest='exp_path', type=str, default='exp0/')
args = parser.parse_args()
# Map the correct sample environment to user argument
env_map = {
'trivial': ('sample_envs/trivial', TrivialEnv),
'gridnav': ('sample_envs/gridnav', Grid) if not args.model_name else ('sample_envs/gridnav', GridNav),
'meteor': ('sample_envs/meteor', MeteorEnv),
}
env_path, env = env_map[args.env]
configs = yaml.load(open(env_path + '/config/config.yaml', 'r'), Loader=yaml.SafeLoader)
env = env()
# Start up the DeMAC coordinator, linking the shared environment to the coordinator
coordinator = Coordinator(env, exp_path=args.exp_path, test=args.model_name is not None)
# Initialize agents and their wrapper environments
envs, agents = [], []
for i in range(configs['num_agents']):
envs.append(AgentEnvWrapper(str(i), coordinator))
if args.model_name:
# Load an existing agent model
agents.append(A2C.load(f'./{args.exp_path}/{str(i)}/models/{args.model_name}', envs[i], device='cpu'))
else:
# Initialize a new agent model, and set the logger path
model = A2C(MlpPolicy, envs[i], verbose=1, device='cpu')
model.set_logger(configure(folder=envs[i].agent_path))
agents.append(model)
# Start up the coordinator server to start listening for agent requests
coordinator.start()
if args.model_name:
# Evaluate the given model
env.evaluate(num_eps=1000, agents=agents, envs=envs)
else:
# Begin agent learning from scratch
for i, agent in enumerate(agents):
model_path = Path(f'./{args.exp_path}/{i}/models/')
model_path.mkdir(parents=True)
checkpoint_callback = CheckpointCallback(save_freq=1e4, save_path=model_path,
name_prefix='rl_model')
p = Process(target=agent.learn, args=(1e7,), kwargs={'callback': [checkpoint_callback]})
p.start()
|
websocket_manager.py
|
import json
import time
from threading import Thread, Lock
from queue import Queue
from typing import Callable
from gzip import decompress
from websocket import WebSocketApp
from confluent_kafka import Producer
class WebsocketManager():
_CONNECT_TIMEOUT_S = 5
def __init__(self, url: str, subscribe: Callable, unsubscribe: Callable):
"""
subscribe is a function that's called right after the websocket connects.
unsubscribe is a function that's called just before the websocket disconnects.
both subscribe and unsubscribe MUST have one argument, which is an instance of
WebsocketManager (see KrakenWsManagerFactory in ws_factories.py for an example).
"""
self.connect_lock = Lock()
self.ws = None
self.queue = Queue()
self.url = url
self.subscribe = subscribe
self.unsubscribe = unsubscribe
self.connect()
conf = {
'bootstrap.servers': 'SSL://kafka-16054d72-gda-3ad8.aivencloud.com:18921',
'security.protocol' : 'SSL',
'client.id': 'kafka-python-producer',
'ssl.certificate.location': '../../jay.cert',
'ssl.key.location': '../../jay.key',
'ssl.ca.location': '../../ca-aiven-cert.pem',
}
self.producer = Producer(conf)
def _acked(self, err, msg):
if err is not None:
print("Failed to deliver message: {}".format(err))
else:
#delivered_records += 1
print("Produced record to topic {} partition [{}] @ offset {}"
.format(msg.topic(), msg.partition(), msg.offset()))
def get_msg(self):
"""
Retrieves a message from the front of the queue.
NOTE: The message received has an extra field "received_timestamp", which
is the UTC timestamp of when the message was received in milliseconds.
"""
return self.queue.get()
def _on_message(self, ws, message):
message = json.loads(message)
if isinstance(message, dict):
message["receive_timestamp"] = int(time.time()*10**3)
try:
self.producer.produce(f"test-ftx-raw", value=json.dumps(message), on_delivery=self._acked)
self.producer.poll(0)
except Exception as e:
print("An error occurred while producing: %s" % e)
def get_q_size(self):
"""Returns the size of the queue"""
print(f"Queue Backlog: {self.queue.qsize()}")
def send(self, message):
"""Sends a message over the websocket"""
self.connect()
self.ws.send(message)
def send_json(self, message):
"""Sends a json message over the websocket"""
self.send(json.dumps(message))
def _connect(self):
"""Creates a websocket app and connects"""
assert not self.ws, "ws should be closed before attempting to connect"
self.ws = WebSocketApp(
self.url,
on_message=self._wrap_callback(self._on_message),
on_close=self._wrap_callback(self._on_close),
on_error=self._wrap_callback(self._on_error),
)
wst = Thread(target=self._run_websocket, args=(self.ws,))
wst.daemon = True
wst.start()
# Wait for socket to connect
ts = time.time()
while self.ws and (not self.ws.sock or not self.ws.sock.connected):
if time.time() - ts > self._CONNECT_TIMEOUT_S:
self.ws = None
raise Exception(
f"Failed to connect to websocket url {self._get_url()}")
time.sleep(0.1)
def _wrap_callback(self, f):
"""Wrap websocket callback"""
def wrapped_f(ws, *args, **kwargs):
if ws is self.ws:
try:
f(ws, *args, **kwargs)
except Exception as e:
raise Exception(f'Error running websocket callback: {e}')
return wrapped_f
def _run_websocket(self, ws):
""""Runs the websocket app"""
try:
ws.run_forever(ping_interval=30)
except Exception as e:
raise Exception(f'Unexpected error while running websocket: {e}')
finally:
pass
# self._reconnect(ws)
def _reconnect(self, ws):
"""Closes a connection and attempts to reconnect"""
assert ws is not None, '_reconnect should only be called with an existing ws'
if ws is self.ws:
self.ws = None
ws.close()
self.connect()
def connect(self):
"""Connects to the websocket"""
if self.ws:
return
with self.connect_lock:
while not self.ws:
self._connect()
if self.ws:
self.subscribe(self)
return
def resubscribe(self):
self.unsubscribe()
self.subscribe()
def _on_close(self, ws, a, b):
print("Connection Closed")
self.unsubscribe(self)
self._reconnect(ws)
def _on_error(self, ws, error):
print(f"websocket error: {error}")
self._reconnect(ws)
def reconnect(self) -> None:
if self.ws is not None:
self._reconnect(self.ws)
|
lichessbot.py
|
import argparse
import chess
from chess import engine
from chess import variant
import chess.polyglot
import model
import json
import lichess
import logging
import multiprocessing
from multiprocessing import Process
import signal
import backoff
from config import load_config
from requests.exceptions import HTTPError, ReadTimeout
import os
import time
logger = logging.getLogger(__name__)
from http.client import RemoteDisconnected
terminated = False
def signal_handler(signal, frame):
global terminated
logger.debug("Recieved SIGINT. Terminating client.")
terminated = True
signal.signal(signal.SIGINT, signal_handler)
def is_final(exception):
return isinstance(exception, HTTPError) and exception.response.status_code < 500
def upgrade_account(li):
if li.upgrade_to_bot_account() is None:
return False
logger.info("Succesfully upgraded to Bot Account!")
return True
def watch_control_stream(control_queue, li):
while not terminated:
try:
response = li.get_event_stream()
lines = response.iter_lines()
for line in lines:
if line:
event = json.loads(line.decode('utf-8'))
control_queue.put_nowait(event)
logger.info(event)
except:
logger.info("Network error: cannot get data from lichess! check your network connection or try again in a few minutes.")
pass
def start(li, user_profile, config):
challenge_config = config["challenge"]
logger.info("You're now connected to {} and awaiting challenges.".format(config["url"]))
control_queue=multiprocessing.Manager().Queue()
control_stream = Process(target=watch_control_stream, args=[control_queue,li])
control_stream.start()
while not terminated:
event=control_queue.get()
if event["type"] == "terminated":
break
elif event["type"] == "challenge":
chlng = model.Challenge(event["challenge"])
if chlng.is_supported(challenge_config):
try:
logger.info("Accept {}".format(chlng))
response = li.accept_challenge(chlng.id)
logger.info("Challenge Accept Response :{}".format(response))
except (HTTPError, ReadTimeout) as exception:
if isinstance(exception, HTTPError) and exception.response.status_code == 404: # ignore missing challenge
logger.info(" Skip missing :{}".format(chlng))
else:
try:
li.decline_challenge(chlng.id)
logger.info(" Decline :{}".format(chlng))
except:
pass
elif event["type"] == "gameStart":
play_game(li, event["game"]["id"], user_profile, config)
logger.info("Terminated")
control_stream.terminate()
control_stream.join()
ponder_results = {}
@backoff.on_exception(backoff.expo, BaseException, max_time=600, giveup=is_final)
def play_game(li, game_id, user_profile, config):
response = li.get_game_stream(game_id)
lines = response.iter_lines()
#Initial response of stream will be the full game info. Store it
initial_state = json.loads(next(lines).decode('utf-8'))
game = model.Game(initial_state, user_profile["username"], li.baseUrl, config.get("abort_time", 20))
timelim=game.state["btime"]/1000
timelim=timelim/60
timep=round(timelim/85*60,1)
if timep>10:
timep=10
elif timep<0.3:
timep=0.3
board = setup_board(game)
cfg = config["engine"]
if type(board).uci_variant=="chess":
engine_path = os.path.join(cfg["dir"], cfg["name"])
bookname="book.bin"
else:
engine_path = os.path.join(cfg["dir"], cfg["variantname"])
bookname="bookchen.bin"
engineeng = engine.SimpleEngine.popen_uci(engine_path)
engineeng.configure({'Threads':5})
engineeng.configure({'Hash':120})
try:
engineeng.configure({'EvalFile':"nn-517c4f68b5df.nnue"})
except:
pass
engineeng.configure({'Use NNUE':True})
logger.info("Game Details :{}".format(game))
delay_seconds = config.get("rate_limiting_delay", 0)/1000
if is_engine_move(game, board.move_stack) and not is_game_over(game):
with chess.polyglot.open_reader(bookname) as reader:
movesob=[]
weight=[]
for entry in reader.find_all(board):
movesob.append(entry.move)
weight.append(entry.weight)
if len(weight)==0 or max(weight)<9:
move=engineeng.play(board,engine.Limit(time=timep))
board.push(move.move)
li.make_move(game.id, move.move)
time.sleep(delay_seconds)
else:
move=movesob[weight.index(max(weight))]
board.push(move)
li.make_move(game.id, move)
with chess.polyglot.open_reader(bookname) as reader:
while not terminated:
try:
binary_chunk = next(lines)
except(StopIteration):
break
upd = json.loads(binary_chunk.decode('utf-8')) if binary_chunk else None
u_type = upd["type"] if upd else "ping"
if not board.is_game_over():
if u_type == "gameState":
game.state=upd
moves = upd["moves"].split()
board = update_board(board, moves[-1])
if not is_game_over(game) and is_engine_move(game, moves):
moves=[]
weight=[]
for entry in reader.find_all(board):
moves.append(entry.move)
weight.append(entry.weight)
if len(weight)==0 or max(weight)<9:
if game.is_white:
timelim=game.state["wtime"]/1000
else:
timelim=game.state["btime"]/1000
divtime=85-int(len(board.move_stack)/2)
if divtime<1:
timep=1
else:
timep=round(timelim/divtime,1)
if timep>10:
timep=10
elif timep<0.3:
timep=0.3
move=engineeng.play(board,engine.Limit(time=timep))
board.push(move.move)
li.make_move(game.id, move.move)
time.sleep(delay_seconds)
else:
move=moves[weight.index(max(weight))]
board.push(move)
li.make_move(game.id, move)
if board.turn == chess.WHITE:
game.ping(config.get("abort_time", 20), (upd["wtime"] + upd["winc"]) / 1000 + 60)
else:
game.ping(config.get("abort_time", 20), (upd["btime"] + upd["binc"]) / 1000 + 60)
elif u_type == "ping":
if game.should_abort_now():
logger.info(" Aborting {} by lack of activity".format(game.url()))
li.abort(game.id)
break
elif game.should_terminate_now():
logger.info(" Terminating {} by lack of activity".format(game.url()))
if game.is_abortable():
li.abort(game.id)
break
else:
break
logger.info("game over")
engineeng.quit()
def is_white_to_move(game, moves):
return len(moves) % 2 == (0 if game.white_starts else 1)
def setup_board(game):
if game.variant_name.lower() == "chess960":
board = chess.Board(game.initial_fen, chess960=True)
elif game.variant_name == "From Position":
board = chess.Board(game.initial_fen)
else:
VariantBoard = variant.find_variant(game.variant_name)
board = VariantBoard()
moves = game.state["moves"].split()
for move in moves:
board = update_board(board, move)
return board
def is_engine_move(game, moves):
return game.is_white == is_white_to_move(game, moves)
def is_game_over(game):
return game.state["status"] != "started"
def update_board(board, move):
uci_move = chess.Move.from_uci(move)
if board.is_legal(uci_move):
board.push(uci_move)
else:
logger.debug('Ignoring illegal move {} on board {}'.format(move, board.fen()))
return board
def intro():
return r"""
. _/|
. // o\
. || ._) lichess-bot
. //__\
. )___( Play on Lichess with a bot
"""
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Play on Lichess with a bot')
parser.add_argument('-u', action='store_true', help='Add this flag to upgrade your account to a bot account.')
parser.add_argument('-v', action='store_true', help='Verbose output. Changes log level from INFO to DEBUG.')
parser.add_argument('--config', help='Specify a configuration file (defaults to ./config.yml)')
parser.add_argument('-l', '--logfile', help="Log file to append logs to.", default=None)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.v else logging.INFO, filename=args.logfile,
format="%(asctime)-15s: %(message)s")
logger.info(intro())
CONFIG = load_config(args.config or "./config.yml")
li = lichess.Lichess(CONFIG["token"], CONFIG["url"], "1.1.5")
user_profile = li.get_profile()
username = user_profile["username"]
is_bot = user_profile.get("title") == "BOT"
logger.info("Welcome {}!".format(username))
if is_bot is False:
is_bot = upgrade_account(li)
if is_bot:
start(li, user_profile, CONFIG)
else:
logger.error("{} is not a bot account. Please upgrade it to a bot account!".format(user_profile["username"]))
|
softvolume_test.py
|
# -*- coding: utf-8 -*-
import vaex.dataset as dataset
import numpy as np
import numpy
import math
import vaex.utils
import matplotlib.pyplot as plt
import scipy.ndimage
import matplotlib.animation as animation
import matplotlib
import time
def rotz(angle):
matrix = np.identity(4)
matrix[0,0] = np.cos(angle)
matrix[0,1] = np.sin(angle)
matrix[1,0] = -np.sin(angle)
matrix[1,1] = np.cos(angle)
return np.matrix(matrix).T
def rotx(angle):
matrix = np.identity(4)
matrix[1,1] = np.cos(angle)
matrix[1,2] = np.sin(angle)
matrix[2,1] = -np.sin(angle)
matrix[2,2] = np.cos(angle)
return np.matrix(matrix).T
def scale(factor):
matrix = np.identity(4)
for i in range(3):
matrix[i,i] = float(factor)
return np.matrix(matrix).T
def translate(x,y,z):
matrix = np.identity(4)
matrix[3,0:3] = x, y, z
return np.matrix(matrix).T
def proj(size):
matrix = np.identity(4)
#return np.matrix(matrix).T
right = float(size)
left = 0
top = float(N)
bottom = 0
far = 1.
near = 0
print
matrix[0][0] = 2./(right-left)
matrix[1][1] = 2./(top-bottom)
matrix[2][2] = -2./(far-near)
# col3 is only when left, bottom and near != 0
#matrix[3][0] = - (right+left)/(right-left)
return np.matrix(matrix).T
N = 256
N2d = 256
#m = scale_matrix(1./N, [N/2., N/2., N/2.])
#m = scale_matrix(1./N)
#m = rotation_matrix(np.radians(30), [0, 1, 0]) * m
if 0:
print "t", translate(-1, -1, -1)
print "s", scale(2./N)
print "p", proj(N)
print np.dot(m, [0, 0, 0, 1])
print np.dot(m, [N/2, N/2, N/2, 1])
print np.dot(m, (N, N, N, 1))
print np.dot(m, (N, N, 0, 1))
#print rotation_matrix(np.radians(30), [0, 1, 0])
colormaps = []
colormap_pixmap = {}
colormaps_processed = False
cols = []
for x in np.linspace(0,1, 256):
rcol = 0.237 - 2.13*x + 26.92*x**2 - 65.5*x**3 + 63.5*x**4 - 22.36*x**5
gcol = ((0.572 + 1.524*x - 1.811*x**2)/(1 - 0.291*x + 0.1574*x**2))**2
bcol = 1/(1.579 - 4.03*x + 12.92*x**2 - 31.4*x**3 + 48.6*x**4 - 23.36*x**5)
cols.append((rcol, gcol, bcol))
name = 'PaulT_plusmin'
cm_plusmin = matplotlib.colors.LinearSegmentedColormap.from_list(name, cols)
matplotlib.cm.register_cmap(name=name, cmap=cm_plusmin)
#data = dataset.Hdf5MemoryMapped("data/dist/Aq-A-2-999-shuffled-fraction.hdf5")
data = dataset.Hdf5MemoryMapped("/home/data/vaex/Aq-A-2-999-shuffled.hdf5")
Nrows = int(1e7)
#x, y, z = [col[:Nrows] for col in [data.columns["x"], data.columns["y"], data.columns["z"]]]
x, y, z = [col for col in [data.columns["x"], data.columns["y"], data.columns["z"]]]
x = x - 54 #x.mean()
y = y - 50 #y.mean()
z = z - 50 #y.mean()
import vaex.histogram
density = np.zeros((N,N,N))
#vaex.histogram.hist3d(x, y, z, density, np.min(x), np.max(x), np.min(y), np.max(y), np.min(z), np.max(z))
w = 10
#vaex.histogram.hist3d(x, y, z, density, np.min(x)+w, np.max(x)-w, np.min(y)+w, np.max(y)-w, np.min(z)+w, np.max(z)-w)
#vaex.histogram.hist3d(x, y, z, density, -w, w, -w, w, -w, w)
import vaex.vaexfast
#for i in range(10):
t_prev = 0
import threading
class ThreadPool(object):
def __init__(self, ntheads=8):
self.ntheads = ntheads
self.threads = [threading.Thread(target=self.execute, kwargs={"index":i}) for i in range(ntheads)]
self.semaphore_in = threading.Semaphore(0)
self.semaphore_out = threading.Semaphore(0)
for thread in self.threads:
thread.setDaemon(True)
thread.start()
def execute(self, index):
print "index", index
while True:
#print "waiting..", index
self.semaphore_in.acquire()
#print "running..", index
self.callable(index)
#print "done..", index
self.semaphore_out.release()
def run_parallel(self, callable):
self.callable = callable
for thread in self.threads:
self.semaphore_in.release()
for thread in self.threads:
self.semaphore_out.acquire()
from vaex.multithreading import ThreadPool
thread_pool = ThreadPool(8)
#vaex.vaexfast.histogram3d(x, y, z, None, density, -w, w, -w, w, -w, w)
density_per_thread = np.zeros((thread_pool.nthreads, ) + density.shape)
def calc_dens(index, i1, i2):
vaex.vaexfast.histogram3d(x[i1:i2], y[i1:i2], z[i1:i2], None, density[index], -w, w, -w, w, -w, w)
thread_pool.run_blocks(calc_dens, len(x))
density = np.sum(density_per_thread, axis=0)
#density = np.log10(density + 1)
fig, ax = plt.subplots()
def frame(i):
global image
global t_prev
print "frame", i
angle1 = i / 40. * 2 * math.pi/4#/2
angle2 = i / 80. * 2 * math.pi/4#/2
#angle1, angle2 = 0, 0
m = translate(N2d/2., N2d/2., N2d/2.) * scale(N2d/2.) * rotz((angle1)) * rotx((angle2)) * translate(-1, -1, -1) * scale(2./N)
px = np.array(list(m[2].flat))
py = np.array(list(m[1].flat))
#print px, py
surface = np.zeros((N2d,N2d))
surface_per_thread = np.zeros((thread_pool.nthreads, N2d,N2d))
block = density.shape[0]/thread_pool.nthreads
#density_per_thread = [np.ascontiguousarray(density[index*block:(index+1)*block,:,:] * 1) for index in range(thread_pool.ntheads)]
#for i in range(8):
# print "shape", i, density_per_thread[index].shape, density_per_thread[index].strides
with vaex.utils.Timer("proj"):
if 0:
vaex.histogram.proj(density, surface, px, py)
else:
projection = np.array(list(px) + list(py))
#density_per_thread = [density[index*block:(index+1)*block,:,:] for index in range(thread_pool.ntrheads)]
def execute(index, i1, i2):
#print "execute", index, density_per_thread[index].shape, density_per_thread[index].strides
#print index, i1, i2
center = np.array([0., 0., index*block])
#vaex.vaexfast.project(density[index*block:(index+1)*block], surface_per_thread[index], projection, center)
vaex.vaexfast.project(density[i1:i2], surface_per_thread[index], projection, center)
#print [(index*block, (index+1)*block) for index in range(thread_pool.ntheads)]
#dsa
if 1:
#thread_pool.run_parallel(execute)
thread_pool.run_blocks(execute, density.shape[0])
else:
center = np.array([0., 0., 6*block])
vaex.vaexfast.project(density_per_thread[0], surface_per_thread[0], projection, center)
surface = surface_per_thread.sum(axis=0)
#print surface
#I = density.sum(axis=1)
I = np.log10(surface+1)
I = scipy.ndimage.gaussian_filter(I, 1.)
mi, ma = I.min(), I.max()
mi = mi + (ma-mi) * 0.4
ma = ma - (ma-mi) * 0.4
if i == 0:
image = plt.imshow(I, cmap='PaulT_plusmin', interpolation='none', vmin=mi, vmax=ma)
t_prev = time.time()
else:
t_now = time.time()
print "fps", 1/(t_now - t_prev)
t_prev = t_now
image.set_data(I)
return [image]
#plt.show()
ax.hold(False)
ani = animation.FuncAnimation(fig, frame, 10000, interval=10, blit=True)
plt.show()
#data = dict(density=(density, "counts"))
#bbox = np.array([[np.min(x), np.max(x)], [np.min(y), np.max(y)], [np.min(z), np.max(z)]])
|
eval_runner.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bypass TPUEstimator for ResNet-50 Eval."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
from tensorflow.contrib import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.python.data.util import nest as data_nest
FLAGS = flags.FLAGS
_INITIAL_LOSS = 1e7
def wrap_computation_in_while_loop(op_fn, n, parallel_iterations=10):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
ops = op_fn()
if not isinstance(ops, list):
ops = [ops]
with tf.control_dependencies(ops):
return i + 1
return tf.while_loop(
lambda i: tf.less(i, n),
computation, [tf.constant(0)],
parallel_iterations=parallel_iterations)
def device_for_host(task=0, cpu=0):
job_name = FLAGS.tpu_job_name or "tpu_worker"
return "/job:%s/task:%d/device:CPU:%d" % (job_name, task, cpu)
def device_for_tpu_core(task=0, core=0):
job_name = FLAGS.tpu_job_name or "tpu_worker"
return "/job:%s/task:%d/device:TPU_REPLICATED_CORE:%d" % (job_name, task,
core)
def tpu_ordinal_fn(shard_index_in_host):
"""Return the TPU ordinal associated with a shard.
Required because the enqueue ops are placed on CPU.
Args:
shard_index_in_host: the shard index
Returns:
The ordinal of the TPU device the shard's infeed should be placed on.
"""
return shard_index_in_host % FLAGS.num_cores
class EvalRunner(object):
"""Run Eval via direct session.run calls eliminating init and compilation
overheads from TPU Estimator.
"""
def __init__(self, input_fn, model_fn, params, num_steps):
self.feature_structure = {}
self.loss = None
self.enqueue_ops = None
self.metric_initializer = None
self.iterator = None
self.batch_size = params["batch_size"]
with tf.Graph().as_default() as self.graph:
self.build_model(params, input_fn, model_fn, num_steps)
self.tpu_init = tpu.initialize_system()
initializer = tf.global_variables_initializer()
self.tpu_shutdown = tpu.shutdown_system()
self.local_initializer = tf.local_variables_initializer()
self.saver = tf.train.Saver()
cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu or FLAGS.master)
self.sess = tf.Session(cluster_resolver.get_master(), graph=self.graph)
self.sess.run(self.tpu_init)
self.sess.run(initializer)
self.sess.run(self.local_initializer)
self.sess.run(self.iterator.initializer)
def build_model(self, params, input_fn, model_fn, num_steps):
"""Build the TPU model and infeed enqueue ops."""
iparams = {}
iparams["batch_size"] = params["batch_size"] // FLAGS.num_cores
def get_tpu_step(mparams):
"""Get the TPU graph generation function."""
def tpu_step(loss, *args):
"""Generate the TPU graph."""
del loss
unflattened_inputs = data_nest.pack_sequence_as(self.feature_structure,
args)
features = unflattened_inputs["features"]
labels = unflattened_inputs["labels"]
estimator_spec = model_fn(features, labels, tf.estimator.ModeKeys.EVAL,
mparams)
loss = estimator_spec.loss
self.eval_metrics = estimator_spec.eval_metrics
self.eval_tensors = estimator_spec.eval_metrics[1]
with tf.device(device_for_tpu_core()):
outfeed_enqueue_ops = tpu.outfeed_enqueue_tuple(self.eval_tensors)
with tf.control_dependencies([outfeed_enqueue_ops]):
return tf.identity(loss)
return tpu_step
infeed_queue = []
def get_enqueue_ops_fn():
"""Generate the enqueue ops graph function."""
def enqueue_ops_fn():
"""Generate the infeed enqueue ops graph."""
per_host_sharded_inputs = []
control_deps = []
with tf.device(device_for_host()):
for _ in range(FLAGS.num_cores):
with tf.control_dependencies(control_deps):
features, labels = self.iterator.get_next()
self.feature_structure["features"] = features
self.feature_structure["labels"] = labels
flattened_inputs = data_nest.flatten(self.feature_structure)
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed = tpu.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
infeed_queue.append(infeed)
return infeed.generate_enqueue_ops(per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_fn)
return enqueue_ops_fn
with tf.device(device_for_host()):
dataset = input_fn(iparams)
dataset = dataset.cache() # Cache the fully-generated eval dataset.
dataset = dataset.repeat() # Repeat indefinitely for unknown # of evals.
self.iterator = dataset.make_initializable_iterator()
self.enqueue_ops = wrap_computation_in_while_loop(
get_enqueue_ops_fn(), n=num_steps, parallel_iterations=1)
tpu_step = get_tpu_step(params)
@tpu_function.on_device_training_loop
def tpu_loop():
return tpu.repeat(
num_steps, tpu_step, [_INITIAL_LOSS], infeed_queue=infeed_queue[0])
def create_dequeue_ops():
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for v in self.eval_tensors:
dequeue_ops.append([])
tensor_dtypes.append(v.dtype)
tensor_shapes.append(v.shape)
tf.logging.info("appending %s" % v.name)
for i in range(FLAGS.num_cores):
with tf.device(device_for_host()):
outfeed_tensors = tpu.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=i)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
for j in range(len(outfeed_tensors)):
dequeue_ops[j] = tf.concat(dequeue_ops[j], axis=0)
return dequeue_ops
(self.loss,) = tpu.shard(
tpu_loop,
inputs=[],
num_shards=FLAGS.num_cores,
outputs_from_all_shards=False)
self.dequeue_ops = create_dequeue_ops()
with tf.device(device_for_host()):
metrics = self.eval_metrics[0](*self.dequeue_ops)
metric_update_ops = []
metric_value_ops = {}
for (k, v) in metrics.items():
# print("k: ", k)
# print("v: ", v)
metric_update_ops.append(v[1])
metric_value_ops[k] = v[0]
self.metric_update_ops = metric_update_ops
self.metric_value_ops = metric_value_ops
self.metric_initializer = tf.variables_initializer(
tf.get_collection(tf.GraphKeys.METRIC_VARIABLES))
def eval(self, num_steps, checkpoint_path):
"""Run the Eval steps on the TPU device.
Args:
num_steps: number of steps to run eval
checkpoint_path: path to the checkpoint directory
Returns:
A dictionary of evaluation results.
"""
start = time.time()
self.sess.run(self.metric_initializer)
self.saver.restore(self.sess, checkpoint_path)
eval_results = {}
def outfeed_thread_fn():
tf.logging.info("start dequeue ops")
for i in range(num_steps):
_ = self.sess.run(self.metric_update_ops)
# Compute eval metrics
session_out = self.sess.run(self.metric_value_ops)
eval_results["top_1_accuracy"] = session_out["top_1_accuracy"]
def infeed_thread_fn(sess, enqueue_ops):
sess.run([enqueue_ops])
infeed_thread = threading.Thread(
target=infeed_thread_fn, args=(self.sess, self.enqueue_ops))
infeed_thread.start()
outfeed_thread = threading.Thread(target=outfeed_thread_fn)
outfeed_thread.start()
tf.logging.info("Starting Eval on %d steps batch size %d" %
(num_steps, self.batch_size))
loss = self.sess.run([self.loss])
tf.logging.info("Eval Loss = {}".format(loss))
infeed_thread.join()
outfeed_thread.join()
end = time.time()
tf.logging.info("Eval performance: step time {} sec {} examples/sec".format(
end - start, self.batch_size / (end - start)))
return eval_results
|
schedular.py
|
"""
Copyright (c) 2020 OneUpPotato
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from time import time, sleep
from datetime import timedelta
from threading import Thread
from utils.helpers import get_current_utc
class Schedular:
def __init__(self, bot) -> None:
self.bot = bot
# Check for posts to close every 300 seconds (5 minutes).
checking_thread = Thread(target=lambda: self.every(300, self.bot.posts.check_posts))
checking_thread.start()
# Submit a new post every 1800 seconds (30 minutes).
submission_thread = Thread(target=lambda: self.every(1800, self.bot.posts.submit_post))
submission_thread.start()
# Handle the weekly update posts.
weekly_update_thread = Thread(target=self.weekly_post_countdown)
weekly_update_thread.start()
# Post and/or check posts on start depending on the settings.
if self.bot.settings.general["toggles"]["post_on_start"]:
Thread(target=self.bot.posts.submit_post).start()
if self.bot.settings.general["toggles"]["check_posts_on_start"]:
Thread(target=self.bot.posts.check_posts).start()
def every(self, seconds: int, task) -> None:
"""
Runs a task every specific interval.
:param seconds: The amount of seconds between each time the task is run.
"""
next_time = time() + seconds
while True:
sleep(max(0, next_time - time()))
try:
task()
except Exception as e:
if self.bot.sentry:
self.bot.sentry.capture_exception(e)
else:
print(e)
pass
next_time += (time() - next_time) // seconds * seconds + seconds
def weekly_post_countdown(self) -> None:
"""
Posts the weekly post when it turns Monday 12AM UTC
"""
while True:
time = get_current_utc().replace(hour=0, minute=0, second=0, microsecond=0)
wait_for = ((time + timedelta(days=-time.weekday(), weeks=1)) - time).total_seconds()
sleep(wait_for)
self.bot.posts.submit_weekly_post()
|
manager.py
|
import sys
import signal
import threading
import logging.config
from pathlib import Path
from yaml import safe_load
from pmu.sensor import PMUSensor
from pmu.controller import PMUController
from pmu.connection import PMUConnection
from pmu.bridge import PMUBridge
class PMUManager():
def __init__(self,arguments={}):
for key in vars(arguments):
self.set_arguments(name=key,arguments=arguments)
self.load_config()
signal.signal(signal.SIGTERM, self.signal_catcher)
signal.signal(signal.SIGHUP, self.signal_catcher)
signal.signal(signal.SIGINT, self.signal_catcher)
def set_arguments(self,name=None,arguments=None):
if hasattr(arguments,name):
setattr(self, name ,getattr(arguments,name))
def load_config(self):
if not hasattr(self,'config_file'):
self.config_file = Path(f'{self.base_dir}/config.yml')
if self.config_file.exists():
self.config = safe_load(self.config_file.open())
return self
def init_log(self):
if 'logger' in self.config:
logger = None
logging.config.dictConfig(self.config.get('logger'))
if self.debug and 'debug' in logging.root.manager.loggerDict:
logger = 'debug'
elif 'main' in logging.root.manager.loggerDict:
logger = 'main'
self._log = logging.getLogger(logger)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(module)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
self._log=logging.getLogger()
self._log.setLevel(logging.DEBUG)
self._log.addHandler(handler)
def load_sensors(self):
''' Add a bit more validation to the function '''
self._sensors=[]
for capability in self.config:
capability_data = self.config.get(capability)
if 'sensors' in capability_data:
for sensor in capability_data.get('sensors'):
sensor = PMUSensor({
**sensor,
**{
'log':self.log,
'location': self.location,
'connection': self.connection,
'capability': capability
}
})
self._sensors.append(sensor)
def start_sensors(self):
self.threads = []
self.exit_thread = threading.Event()
for sensor in self.sensors:
self.stop_threads = False
thread = threading.Thread(target=sensor.start_sensor, args=(self.exit_thread,))
thread.name = f'PMUSensor-{sensor.room}'
thread.start()
self.threads.append(thread)
def stop_sensors(self):
self.log.debug(f'Stopping all sensors.')
if hasattr(self,'threads'):
for thread in self.threads:
self.exit_thread.set()
thread.join()
self.log.debug(f'All sensors stopped.')
def load_connection(self):
if 'connection' in self.config:
self._connection = PMUConnection({
**self.config.get('connection'),
'log': self.log
})
return None
def load_controllers(self):
''' Add more validation to the load sequence. '''
self._controllers=[]
for capability in self.config:
capability_data = self.config.get(capability)
if 'controllers' in capability_data:
for controller in capability_data.get('controllers'):
controller = PMUController({
**controller,
'log':self.log,
'location': self.location,
'connection': self.connection,
'capability': capability
})
self._controllers.append(controller)
def start_controllers(self):
self._controller_threads = []
self._controller_exit = threading.Event()
for controller in self.controllers:
thread = threading.Thread(target=controller.start_controller, args=(self._controller_exit,))
thread.name = f'PMUController-{controller.floor}'
thread.start()
self._controller_threads.append(thread)
def stop_controllers(self):
self.log.debug(f'Stopping all controllers.')
if hasattr(self,'_controller_threads'):
for thread in self._controller_threads:
self._controller_exit.set()
thread.join()
self.log.debug(f'All controllers stopped.')
def load_bridges(self):
''' Add more validation to the load sequence. '''
self._bridges=[]
if 'bridges' in self.config:
for bridge in self.config.get('bridges'):
_bridge_config = self.config.get('bridges').get(bridge) # This is ugly af and need to be fixed.
self.log.info(f"Initialzing bridge '{bridge}.")
_bridge = PMUBridge({**_bridge_config,'manager':self})
self._bridges.append(_bridge)
def start_bridges(self):
self._bridge_threads = []
self._bridge_exit = threading.Event()
for bridge in self.bridges:
thread = threading.Thread(target=bridge.run, args=(self._bridge_exit,))
thread.name = f'PMUBridge-influxdb'
thread.start()
self._bridge_threads.append(thread)
def stop_bridges(self):
self.log.debug(f'Stopping all bridges.')
if hasattr(self,'_bridge_threads'):
for thread in self._bridge_threads:
self._bridge_exit.set()
thread.join()
self.log.debug(f'All controllers stopped.')
def signal_catcher(self, signalNumber, frame):
if signalNumber == signal.SIGTERM: # SIGTERM
self.log.info('SIGTERM received! Quitting.')
self.graceful_exit()
if signalNumber == 1: # SIGHUP
self.log.info('SIGHUP received. Restarting.')
if signalNumber == 2: # SIGINT
self.log.info('SIGINT received. Quitting.')
self.graceful_exit()
def graceful_exit(self):
self.stop_sensors()
self.stop_controllers()
self.stop_bridges()
@property
def sensors(self):
if not hasattr(self,'_sensors'):
self.load_sensors()
return self._sensors
@property
def controllers(self):
if not hasattr(self,'_controllers'):
self.load_controllers()
return self._controllers
@property
def bridges(self):
if not hasattr(self,'_bridges'):
self.load_bridges()
return self._bridges
@property
def connection(self):
if not hasattr(self,'_connection'):
self.load_connection()
return self._connection
@property
def location(self):
if 'location' in self.config:
return self.config.get('location')
return 'default'
@property
def log(self):
if not hasattr(self,'_log'):
self.init_log()
return self._log
|
bot.py
|
# -*- coding: utf-8 -*-
from .core.data import *
from .core.olap import *
from .core.oltp import *
from .core.test import *
from multiprocessing import Process
def operation(market):
"""
todo
:param market:
:return:
"""
try:
logg(spacer, market)
while market in setup()['enabled'].split() and not halt():
t_delta = time()
run_mode = setup()['runMode'].upper()
debug('', market)
debug('[run_mode = {}]'.format(run_mode), market)
if run_mode == 'TEST':
public_api(market, t_delta)
private_api(market, t_delta)
else:
probe(market, broadway(market, rotate(market)))
t_delta = time() - t_delta
delay = int(setup()['dbUpdateMinutes']) * 60
if delay > t_delta:
w = delay - t_delta
debug('', market)
debug(msgg(103, secs2human(w)), market)
wait(seconds=w)
logg('', market)
logg(spacer, market)
except AttributeError:
logg(format_exc(), market)
operation(market)
except:
logg(format_exc(), market)
def control(t_wait=3):
"""
Start a new process for each enabled plugin.
:param t_wait:
:return:
"""
try:
logg(msgg(101))
bots = {Process(target=operation, args=(exchange_name,))
for exchange_name in setup()['enabled'].split()}
for b in bots:
b.start()
while not halt():
wait()
for b in bots:
b.join(t_wait)
wait(seconds=t_wait)
for b in bots:
b.terminate()
halt(removing=True)
logg(msgg(102))
logg()
except:
logg(format_exc())
if __name__ == '__main__':
spacer = '$$$$$ $$$$$ $$$$$ $$$$$ $$$$$ $$$$$ $$$$$'
control()
|
proxy.py
|
'''Proxy Server'''
import os
import socket
import sys
import threading
import time
import ipaddress
BUFFER_SIZE = 1024
MAX_CONNECTION = 25
ALLOWED_ACTIONS = ['GET', 'POST']
CACHE_SIZE = 3
TIMEOUT = 300
PORT = 20100
HOST = ''
BLACK_LIST = []
blocked = []
blocked_ips = []
admins = []
BLACKLIST_FILE = "blacklist.txt"
USERNAME_PASSWORD_FILE = "username_password.txt"
class Proxy:
def __init__(self, port, hostname):
'''Constructor for Proxy Server'''
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('[log] Socket successfully created')
except socket.error as err:
print('[log] Socket cration error: {}'.format(err))
self.services = []
self.cache = {}
self.NEXT_CACHE = 0
self.key = ['', '', '']
self.cached = {}
self.headers = {}
self.updates = {}
def serverService(self):
'''Server end of the Proxy Server,
that will handle requests from clients'''
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((HOST, PORT))
self.server.listen(MAX_CONNECTION)
while True:
try:
(conn, addr) = self.server.accept()
print('[log] Connection received from host: {}, port: {}'.format(
addr[0], addr[1]))
thread = threading.Thread(
target=self.clientService, args=(conn, addr))
thread.start()
for service in self.services:
if not service['thread'].isAlive():
service['connection'].close()
self.services = [
service for service in self.services if service['thread'].isAlive()]
service = {
'thread': thread,
'connection': conn,
'address': addr
}
self.services.append(service)
except Exception as e:
print('[log] Error: {}'.format(str(e)))
print('[log] Shutting down')
for service in self.services:
service['connection'].close()
self.server.close()
sys.exit(1)
self.server.close()
def clientService(self, conn, addr):
'''Client end of the Proxy Server, that will make requests to servers,
get data and send to the client'''
# Constrain requests from within IIIT only
if addr[1] < 20000 or addr[1] > 20099:
conn.send('HTTP/1.1 401 Access denied'.encode('utf-8'))
conn.close()
print('[log] Connection from host: {}, port: {} denied'.format(
addr[0], addr[1]))
return
try:
# HTTP request
request = str(conn.recv(BUFFER_SIZE))
(req_type, server, port, filename) = self.requestInfo(request)
# print('Request: {}, Server: {}, Port: {}, File: {}'.format(req_type, server, port, filename))
# Reject requests that are not GET or POST
if req_type not in ALLOWED_ACTIONS:
conn.send('HTTP/1.1 400 Bad request'.encode('utf-8'))
conn.close()
print('[log] {} request from host: {}, port: {} denied'.format(
req_type, addr[0], addr[1]))
return
# Invalid port
if port > 20200 or port < 20101:
conn.send('HTTP/1.1 403 Forbidden'.encode('utf-8'))
conn.close()
print('[log] Request from host: {}, port: {} to host: {}, port: {} denied'.format(
addr[0], addr[1], server, port))
return
# Checking if blacklisted
if self.check_blacklist(server, port):
print('[log] Request from host: {}, port: {} to host: {}, port: {} denied, blacklisted domain'.format(
addr[0], addr[1], server, port))
return
try:
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.connect((server, port))
except Exception as e:
print('[log] Error: {}'.format(str(e)))
conn.send('HTTP/1.1 500 Internal server error'.encode('utf-8'))
conn.close()
return
print('Headers:')
print(self.headers)
# Send request to server
if filename + str(server) + str(port) in self.key:
if 'must-revalidate' in self.headers[filename + str(server) + str(port)]:
print('MUST REVALIDATE')
http_request = 'GET /' + filename + ' HTTP/1.1\r\nIf-Modified-Since: ' + \
self.updates[filename +
str(server) + str(port)] + '\r\n\r\n'
server_sock.send(http_request.encode('utf-8'))
else:
content = self.cached[filename + str(server) + str(port)]
print(content)
print(
'[log] Request serviced from cache, file: {}'.format(filename))
conn.send(str('HTTP/1.1 200 OK\r\n\r\n' +
content).encode('utf-8'))
conn.close()
server_sock.close()
return
else:
http_request = 'GET /' + filename + ' HTTP/1.1\r\n\r\n'
server_sock.send(http_request.encode('utf-8'))
# Get response from server
response = server_sock.recv(BUFFER_SIZE).decode()
print(response)
if filename + str(server) + str(port) not in self.cache:
self.cache[filename + str(server) + str(port)
] = {'time': time.time(), 'count': 0}
elif str(filename + str(server) + str(port)) not in self.key:
if time.time() - self.cache[filename + str(server) + str(port)]['time'] > TIMEOUT:
self.cache[filename + str(server) + str(port)
] = {'time': time.time(), 'count': 0}
else:
self.cache[filename +
str(server) + str(port)]['count'] += 1
# Use cache, or update cache
if response.find('200') != -1:
res = response
if self.cache[filename + str(server) + str(port)]['count'] > 1:
if filename + str(server) + str(port) not in self.key and self.key[self.NEXT_CACHE] != '':
self.cached.pop(self.key[self.NEXT_CACHE])
self.headers.pop(self.key[self.NEXT_CACHE])
self.updates.pop(self.key[self.NEXT_CACHE])
self.key[self.NEXT_CACHE] = filename + \
str(server) + str(port)
self.NEXT_CACHE = (self.NEXT_CACHE + 1) % CACHE_SIZE
# Headers
content = ''
while True:
response = server_sock.recv(BUFFER_SIZE).decode()
print(response)
if len(response) > 0:
content += response
else:
break
print(content)
print('[log] Request serviced by server, file: {}'.format(filename))
if self.cache[filename + str(server) + str(port)]['count'] > 1:
self.cached[filename + str(server) + str(port)] = content
self.headers[filename + str(server) + str(port)] = res
self.updates[filename + str(server) + str(port)] = res.split('\r\n')[
2].split(':', 1)[1].lstrip()
print(self.updates[filename + str(server) + str(port)])
conn.send(str('HTTP/1.1 200 OK\r\n\r\n' +
content).encode('utf-8'))
elif response.find('304') != -1:
content = self.cached[filename + str(server) + str(port)]
print('[log] Request serviced from cache, file: {}'.format(filename))
conn.send(str('HTTP/1.1 200 OK\r\n\r\n' +
content).encode('utf-8'))
elif response.find('404') != -1:
conn.send('HTTP/1.1 404 File not found\r\n\r\n'.encode('utf-8'))
conn.close()
print('[log] Requested file not found')
return
else:
print('[log] Response from server: {}'.format(response))
server_sock.close()
conn.close()
except Exception as e:
print('[log] Error: {}'.format(str(e)))
conn.send(
'Error in connecting to server, try again later\n'.encode('utf-8'))
conn.close()
def check_blacklist(self, requested_server, requested_port):
# if not (details["server_url"] + ":" + str(details["server_port"])) in blocked:
# return False
# if not details["auth_b64"]:
# return True
# if details["auth_b64"] in admins:
# return False
# return True
print("checking")
print(requested_server+str(requested_port))
req = requested_server+":"+str(requested_port)
if req in blocked_ips:
u = input("Enter username")
p = input("Enter password")
print(u)
print(p)
if u == username and p == password:
return False
return True
return False
def requestInfo(self, request):
'''Function to extract server, port and filename requested from HTTP request'''
req_type = request.split()[0][2:]
url = request.split()[1]
# line = request.split('\r\n')[0]
# url = line.split()[1]
no_protocol = url.find('://')
if no_protocol == -1:
host_url = url
else:
host_url = url[(no_protocol + 3):]
port_start = host_url.find(':')
port_end = host_url.find('/')
if port_end == -1:
port_end = len(host_url)
server = ''
port = -1
if port_start == -1 or port_end < port_start:
port = 20101
server = host_url[:port_end]
else:
port = int((host_url[(port_start + 1):])
[:port_end - port_start - 1])
server = host_url[:port_start]
try:
filename = host_url.split('/')[1]
except IndexError:
filename = '/'
return (req_type, server, port, filename)
# # Generating the blacklist
# f = open('./blacklist.txt', 'r')
# blacklist = f.read()
# entries = blacklist.split('\n')
# while len(blacklist) > 0:
# for entry in entries:
# if len(entry) > 0:
# info = entry.split()
# BLACK_LIST.append((info[0], info[1]))
# blacklist = f.read()
# entries = blacklist.split('\n')
# f.close()
f = open(BLACKLIST_FILE, "rb")
data = ""
while True:
chunk = f.read()
if not len(chunk):
break
data += str(chunk)
f.close()
blocked = data.split("\\n")
blocked[0] = blocked[0][2:]
blocked = blocked[:-1]
for b in blocked:
net4 = ipaddress.ip_network(b.split(":")[0])
port = b.split(":")[1]
for x in net4.hosts():
blocked_ips.append(str(x)+":"+port)
f = open(USERNAME_PASSWORD_FILE, "rb")
data = ""
while True:
chunk = f.read()
if not len(chunk):
break
data += str(chunk)
f.close()
data = data.splitlines()
for d in data:
# admins.append(base64.b64encode(d))
admins.append(d[2:-1].split("\\n")[:-1])
username = admins[0][0]
password = admins[0][1]
# print(blocked)
# print()
# print(admins)
# print()
# # print(blocked_ips)
# print(username)
# print(password)
proxy = Proxy(PORT, HOST)
if proxy.server:
proxy.serverService()
|
__init__.py
|
"""
objectstore package, abstraction for storing blobs of data for use in Galaxy.
all providers ensure that data can be accessed on the filesystem for running
tools
"""
import abc
import logging
import os
import random
import shutil
import threading
import time
import yaml
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.util import (
directory_hash_id,
force_symlink,
parse_xml,
umask_fix_perms,
)
from galaxy.util.bunch import Bunch
from galaxy.util.path import (
safe_makedirs,
safe_relpath,
)
from galaxy.util.sleeper import Sleeper
NO_SESSION_ERROR_MESSAGE = "Attempted to 'create' object store entity in configuration with no database session present."
log = logging.getLogger(__name__)
class ObjectStore(metaclass=abc.ABCMeta):
"""ObjectStore interface.
FIELD DESCRIPTIONS (these apply to all the methods in this class):
:type obj: StorableObject
:param obj: A Galaxy object with an assigned database ID accessible via
the .id attribute.
:type base_dir: string
:param base_dir: A key in `self.extra_dirs` corresponding to the base
directory in which this object should be created, or `None` to specify
the default directory.
:type dir_only: boolean
:param dir_only: If `True`, check only the path where the file identified
by `obj` should be located, not the dataset itself. This option applies
to `extra_dir` argument as well.
:type extra_dir: string
:param extra_dir: Append `extra_dir` to the directory structure where the
dataset identified by `obj` should be located. (e.g.,
000/extra_dir/obj.id). Valid values include 'job_work' (defaulting to
config.jobs_directory =
'$GALAXY_ROOT/database/jobs_directory');
'temp' (defaulting to config.new_file_path =
'$GALAXY_ROOT/database/tmp').
:type extra_dir_at_root: boolean
:param extra_dir_at_root: Applicable only if `extra_dir` is set. If True,
the `extra_dir` argument is placed at root of the created directory
structure rather than at the end (e.g., extra_dir/000/obj.id vs.
000/extra_dir/obj.id)
:type alt_name: string
:param alt_name: Use this name as the alternative name for the created
dataset rather than the default.
:type obj_dir: boolean
:param obj_dir: Append a subdirectory named with the object's ID (e.g.
000/obj.id)
"""
@abc.abstractmethod
def exists(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""Return True if the object identified by `obj` exists, False otherwise."""
raise NotImplementedError()
@abc.abstractmethod
def create(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Mark the object (`obj`) as existing in the store, but with no content.
This method will create a proper directory structure for
the file if the directory does not already exist.
"""
raise NotImplementedError()
@abc.abstractmethod
def empty(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Test if the object identified by `obj` has content.
If the object does not exist raises `ObjectNotFound`.
"""
raise NotImplementedError()
@abc.abstractmethod
def size(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return size of the object identified by `obj`.
If the object does not exist, return 0.
"""
raise NotImplementedError()
@abc.abstractmethod
def delete(self, obj, entire_dir=False, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Delete the object identified by `obj`.
:type entire_dir: boolean
:param entire_dir: If True, delete the entire directory pointed to by
extra_dir. For safety reasons, this option applies
only for and in conjunction with the extra_dir or
obj_dir options.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_data(self, obj, start=0, count=-1, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Fetch `count` bytes of data offset by `start` bytes using `obj.id`.
If the object does not exist raises `ObjectNotFound`.
:type start: int
:param start: Set the position to start reading the dataset file
:type count: int
:param count: Read at most `count` bytes from the dataset
"""
raise NotImplementedError()
@abc.abstractmethod
def get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Get the expected filename with absolute path for object with id `obj.id`.
This can be used to access the contents of the object.
"""
raise NotImplementedError()
@abc.abstractmethod
def update_from_file(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, file_name=None, create=False):
"""
Inform the store that the file associated with `obj.id` has been updated.
If `file_name` is provided, update from that file instead of the
default.
If the object does not exist raises `ObjectNotFound`.
:type file_name: string
:param file_name: Use file pointed to by `file_name` as the source for
updating the dataset identified by `obj`
:type create: boolean
:param create: If True and the default dataset does not exist, create
it first.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return the URL for direct access if supported, otherwise return None.
Note: need to be careful to not bypass dataset security with this.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_concrete_store_name(self, obj):
"""Return a display name or title of the objectstore corresponding to obj.
To accommodate nested objectstores, obj is passed in so this metadata can
be returned for the ConcreteObjectStore corresponding to the object.
If the dataset is in a new or discarded state and an object_store_id has not
yet been set, this may return ``None``.
"""
@abc.abstractmethod
def get_concrete_store_description_markdown(self, obj):
"""Return a longer description of how data 'obj' is stored.
To accommodate nested objectstores, obj is passed in so this metadata can
be returned for the ConcreteObjectStore corresponding to the object.
If the dataset is in a new or discarded state and an object_store_id has not
yet been set, this may return ``None``.
"""
@abc.abstractmethod
def get_store_usage_percent(self):
"""Return the percentage indicating how full the store is."""
raise NotImplementedError()
@abc.abstractmethod
def get_store_by(self, obj):
"""Return how object is stored (by 'uuid', 'id', or None if not yet saved).
Certain Galaxy remote data features aren't available if objects are stored by 'id'.
"""
raise NotImplementedError()
class BaseObjectStore(ObjectStore):
def __init__(self, config, config_dict=None, **kwargs):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the following attributes:
* object_store_check_old_style (only used by the
:class:`DiskObjectStore` subclass)
* jobs_directory -- Each job is given a unique empty directory
as its current working directory. This option defines in what
parent directory those directories will be created.
* new_file_path -- Used to set the 'temp' extra_dir.
"""
if config_dict is None:
config_dict = {}
self.running = True
self.config = config
self.check_old_style = config.object_store_check_old_style
extra_dirs = {}
extra_dirs['job_work'] = config.jobs_directory
extra_dirs['temp'] = config.new_file_path
extra_dirs.update({
e['type']: e['path'] for e in config_dict.get('extra_dirs', [])})
self.extra_dirs = extra_dirs
def shutdown(self):
"""Close any connections for this ObjectStore."""
self.running = False
def file_ready(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Check if a file corresponding to a dataset is ready to be used.
Return True if so, False otherwise
"""
return True
@classmethod
def parse_xml(clazz, config_xml):
"""Parse an XML description of a configuration for this object store.
Return a configuration dictionary (such as would correspond to the YAML configuration)
for the object store.
"""
raise NotImplementedError()
@classmethod
def from_xml(clazz, config, config_xml, **kwd):
config_dict = clazz.parse_xml(config_xml)
return clazz(config, config_dict, **kwd)
def to_dict(self):
extra_dirs = []
for extra_dir_type, extra_dir_path in self.extra_dirs.items():
extra_dirs.append({"type": extra_dir_type, "path": extra_dir_path})
return {
'config': config_to_dict(self.config),
'extra_dirs': extra_dirs,
'type': self.store_type,
}
def _get_object_id(self, obj):
if hasattr(obj, self.store_by):
obj_id = getattr(obj, self.store_by)
if obj_id is None:
obj.flush()
return obj.id
return obj_id
else:
# job's don't have uuids, so always use ID in this case when creating
# job working directories.
return obj.id
def _invoke(self, delegate, obj=None, **kwargs):
return self.__getattribute__("_" + delegate)(obj=obj, **kwargs)
def exists(self, obj, **kwargs):
return self._invoke('exists', obj, **kwargs)
def create(self, obj, **kwargs):
return self._invoke('create', obj, **kwargs)
def empty(self, obj, **kwargs):
return self._invoke('empty', obj, **kwargs)
def size(self, obj, **kwargs):
return self._invoke('size', obj, **kwargs)
def delete(self, obj, **kwargs):
return self._invoke('delete', obj, **kwargs)
def get_data(self, obj, **kwargs):
return self._invoke('get_data', obj, **kwargs)
def get_filename(self, obj, **kwargs):
return self._invoke('get_filename', obj, **kwargs)
def update_from_file(self, obj, **kwargs):
return self._invoke('update_from_file', obj, **kwargs)
def get_object_url(self, obj, **kwargs):
return self._invoke('get_object_url', obj, **kwargs)
def get_concrete_store_name(self, obj):
return self._invoke('get_concrete_store_name', obj)
def get_concrete_store_description_markdown(self, obj):
return self._invoke('get_concrete_store_description_markdown', obj)
def get_store_usage_percent(self):
return self._invoke('get_store_usage_percent')
def get_store_by(self, obj, **kwargs):
return self._invoke('get_store_by', obj, **kwargs)
class ConcreteObjectStore(BaseObjectStore):
"""Subclass of ObjectStore for stores that don't delegate (non-nested).
Currently only adds store_by functionality. Which doesn't make
sense for the delegating object stores.
"""
def __init__(self, config, config_dict=None, **kwargs):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the following attributes:
* object_store_check_old_style (only used by the
:class:`DiskObjectStore` subclass)
* jobs_directory -- Each job is given a unique empty directory
as its current working directory. This option defines in what
parent directory those directories will be created.
* new_file_path -- Used to set the 'temp' extra_dir.
"""
if config_dict is None:
config_dict = {}
super().__init__(config=config, config_dict=config_dict, **kwargs)
self.store_by = config_dict.get("store_by", None) or getattr(config, "object_store_store_by", "id")
self.name = config_dict.get("name", None)
self.description = config_dict.get("description", None)
def to_dict(self):
rval = super().to_dict()
rval["store_by"] = self.store_by
rval["name"] = self.name
rval["description"] = self.description
return rval
def _get_concrete_store_name(self, obj):
return self.name
def _get_concrete_store_description_markdown(self, obj):
return self.description
def _get_store_by(self, obj):
return self.store_by
class DiskObjectStore(ConcreteObjectStore):
"""
Standard Galaxy object store.
Stores objects in files under a specific directory on disk.
>>> from galaxy.util.bunch import Bunch
>>> import tempfile
>>> file_path=tempfile.mkdtemp()
>>> obj = Bunch(id=1)
>>> s = DiskObjectStore(Bunch(umask=0o077, jobs_directory=file_path, new_file_path=file_path, object_store_check_old_style=False), dict(files_dir=file_path))
>>> s.create(obj)
>>> s.exists(obj)
True
>>> assert s.get_filename(obj) == file_path + '/000/dataset_1.dat'
"""
store_type = 'disk'
def __init__(self, config, config_dict):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the same attributes needed by
:class:`ObjectStore` plus:
* file_path -- Default directory to store objects to disk in.
* umask -- the permission bits for newly created files.
:type file_path: str
:param file_path: Override for the `config.file_path` value.
:type extra_dirs: dict
:param extra_dirs: Keys are string, values are directory paths.
"""
super().__init__(config, config_dict)
self.file_path = os.path.abspath(config_dict.get("files_dir") or config.file_path)
@classmethod
def parse_xml(clazz, config_xml):
extra_dirs = []
config_dict = {}
if config_xml is not None:
store_by = config_xml.attrib.get('store_by', None)
if store_by is not None:
config_dict['store_by'] = store_by
name = config_xml.attrib.get('name', None)
if name is not None:
config_dict['name'] = name
for e in config_xml:
if e.tag == 'files_dir':
config_dict["files_dir"] = e.get('path')
elif e.tag == 'description':
config_dict["description"] = e.text
else:
extra_dirs.append({"type": e.get('type'), "path": e.get('path')})
config_dict["extra_dirs"] = extra_dirs
return config_dict
def to_dict(self):
as_dict = super().to_dict()
as_dict["files_dir"] = self.file_path
return as_dict
def __get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False):
"""
Return the absolute path for the file corresponding to the `obj.id`.
This is regardless of whether or not the file exists.
"""
path = self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir,
extra_dir_at_root=extra_dir_at_root, alt_name=alt_name,
obj_dir=False, old_style=True)
# For backward compatibility: check the old style root path first;
# otherwise construct hashed path.
if not os.path.exists(path):
return self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir,
extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
# TODO: rename to _disk_path or something like that to avoid conflicts with
# children that'll use the local_extra_dirs decorator, e.g. S3
def _construct_path(self, obj, old_style=False, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, **kwargs):
"""
Construct the absolute path for accessing the object identified by `obj.id`.
:type base_dir: string
:param base_dir: A key in self.extra_dirs corresponding to the base
directory in which this object should be created, or
None to specify the default directory.
:type dir_only: boolean
:param dir_only: If True, check only the path where the file
identified by `obj` should be located, not the
dataset itself. This option applies to `extra_dir`
argument as well.
:type extra_dir: string
:param extra_dir: Append the value of this parameter to the expected
path used to access the object identified by `obj` (e.g.,
/files/000/<extra_dir>/dataset_10.dat).
:type alt_name: string
:param alt_name: Use this name as the alternative name for the returned
dataset rather than the default.
:type old_style: boolean
param old_style: This option is used for backward compatibility. If
`True` then the composed directory structure does not include a
hash id (e.g., /files/dataset_10.dat (old) vs.
/files/000/dataset_10.dat (new))
"""
base = os.path.abspath(self.extra_dirs.get(base_dir, self.file_path))
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
if extra_dir and extra_dir != os.path.normpath(extra_dir):
log.warning('extra_dir is not normalized: %s', extra_dir)
raise ObjectInvalid("The requested object is invalid")
# ensure that any parent directory references in alt_name would not
# result in a path not contained in the directory path constructed here
if alt_name and not safe_relpath(alt_name):
log.warning('alt_name would locate path outside dir: %s', alt_name)
raise ObjectInvalid("The requested object is invalid")
obj_id = self._get_object_id(obj)
if old_style:
if extra_dir is not None:
path = os.path.join(base, extra_dir)
else:
path = base
else:
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj_id))
# Create a subdirectory for the object ID
if obj_dir:
rel_path = os.path.join(rel_path, str(obj_id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
path = os.path.join(base, rel_path)
if not dir_only:
assert obj_id is not None, "The effective dataset identifier consumed by object store [%s] must be set before a path can be constructed." % (self.store_by)
path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % obj_id)
return os.path.abspath(path)
def _exists(self, obj, **kwargs):
"""Override `ObjectStore`'s stub and check on disk."""
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility: check root path first; otherwise
# construct and check hashed path.
if os.path.exists(path):
return True
return os.path.exists(self._construct_path(obj, **kwargs))
def _create(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by creating any files and folders on disk."""
if not self._exists(obj, **kwargs):
path = self._construct_path(obj, **kwargs)
dir_only = kwargs.get('dir_only', False)
# Create directory if it does not exist
dir = path if dir_only else os.path.dirname(path)
safe_makedirs(dir)
# Create the file if it does not exist
if not dir_only:
open(path, 'w').close() # Should be rb?
umask_fix_perms(path, self.config.umask, 0o666)
def _empty(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by checking file size on disk."""
return self.size(obj, **kwargs) == 0
def _size(self, obj, **kwargs):
"""Override `ObjectStore`'s stub by return file size on disk.
Returns 0 if the object doesn't exist yet or other error.
"""
if self._exists(obj, **kwargs):
try:
filepath = self._get_filename(obj, **kwargs)
for _ in range(0, 2):
size = os.path.getsize(filepath)
if size != 0:
break
# May be legitimately 0, or there may be an issue with the FS / kernel, so we try again
time.sleep(0.01)
return size
except OSError:
return 0
else:
return 0
def _delete(self, obj, entire_dir=False, **kwargs):
"""Override `ObjectStore`'s stub; delete the file or folder on disk."""
path = self._get_filename(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
obj_dir = kwargs.get('obj_dir', False)
try:
if entire_dir and (extra_dir or obj_dir):
shutil.rmtree(path)
return True
if self._exists(obj, **kwargs):
os.remove(path)
return True
except OSError as ex:
log.critical('{} delete error {}'.format(self.__get_filename(obj, **kwargs), ex))
return False
def _get_data(self, obj, start=0, count=-1, **kwargs):
"""Override `ObjectStore`'s stub; retrieve data directly from disk."""
data_file = open(self._get_filename(obj, **kwargs)) # Should be rb?
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def _get_filename(self, obj, **kwargs):
"""
Override `ObjectStore`'s stub.
If `object_store_check_old_style` is set to `True` in config then the
root path is checked first.
"""
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility, check root path first; otherwise,
# construct and return hashed path
if os.path.exists(path):
return path
path = self._construct_path(obj, **kwargs)
if not os.path.exists(path):
raise ObjectNotFound
return path
def _update_from_file(self, obj, file_name=None, create=False, **kwargs):
"""`create` parameter is not used in this implementation."""
preserve_symlinks = kwargs.pop('preserve_symlinks', False)
# FIXME: symlinks and the object store model may not play well together
# these should be handled better, e.g. registering the symlink'd file
# as an object
if create:
self._create(obj, **kwargs)
if file_name and self._exists(obj, **kwargs):
try:
if preserve_symlinks and os.path.islink(file_name):
force_symlink(os.readlink(file_name), self._get_filename(obj, **kwargs))
else:
path = self._get_filename(obj, **kwargs)
shutil.copy(file_name, path)
umask_fix_perms(path, self.config.umask, 0o666)
except OSError as ex:
log.critical('Error copying {} to {}: {}'.format(file_name, self.__get_filename(obj, **kwargs), ex))
raise ex
def _get_object_url(self, obj, **kwargs):
"""
Override `ObjectStore`'s stub.
Returns `None`, we have no URLs.
"""
return None
def _get_store_usage_percent(self, **kwargs):
"""Override `ObjectStore`'s stub by return percent storage used."""
st = os.statvfs(self.file_path)
return (float(st.f_blocks - st.f_bavail) / st.f_blocks) * 100
class NestedObjectStore(BaseObjectStore):
"""
Base for ObjectStores that use other ObjectStores.
Example: DistributedObjectStore, HierarchicalObjectStore
"""
def __init__(self, config, config_xml=None):
"""Extend `ObjectStore`'s constructor."""
super().__init__(config)
self.backends = {}
def shutdown(self):
"""For each backend, shuts them down."""
for store in self.backends.values():
store.shutdown()
super().shutdown()
def _exists(self, obj, **kwargs):
"""Determine if the `obj` exists in any of the backends."""
return self._call_method('_exists', obj, False, False, **kwargs)
def file_ready(self, obj, **kwargs):
"""Determine if the file for `obj` is ready to be used by any of the backends."""
return self._call_method('file_ready', obj, False, False, **kwargs)
def _create(self, obj, **kwargs):
"""Create a backing file in a random backend."""
random.choice(list(self.backends.values())).create(obj, **kwargs)
def _empty(self, obj, **kwargs):
"""For the first backend that has this `obj`, determine if it is empty."""
return self._call_method('_empty', obj, True, False, **kwargs)
def _size(self, obj, **kwargs):
"""For the first backend that has this `obj`, return its size."""
return self._call_method('_size', obj, 0, False, **kwargs)
def _delete(self, obj, **kwargs):
"""For the first backend that has this `obj`, delete it."""
return self._call_method('_delete', obj, False, False, **kwargs)
def _get_data(self, obj, **kwargs):
"""For the first backend that has this `obj`, get data from it."""
return self._call_method('_get_data', obj, ObjectNotFound, True, **kwargs)
def _get_filename(self, obj, **kwargs):
"""For the first backend that has this `obj`, get its filename."""
return self._call_method('_get_filename', obj, ObjectNotFound, True, **kwargs)
def _update_from_file(self, obj, **kwargs):
"""For the first backend that has this `obj`, update it from the given file."""
if kwargs.get('create', False):
self._create(obj, **kwargs)
kwargs['create'] = False
return self._call_method('_update_from_file', obj, ObjectNotFound, True, **kwargs)
def _get_object_url(self, obj, **kwargs):
"""For the first backend that has this `obj`, get its URL."""
return self._call_method('_get_object_url', obj, None, False, **kwargs)
def _get_concrete_store_name(self, obj):
return self._call_method('_get_concrete_store_name', obj, None, False)
def _get_concrete_store_description_markdown(self, obj):
return self._call_method('_get_concrete_store_description_markdown', obj, None, False)
def _get_store_by(self, obj):
return self._call_method('_get_store_by', obj, None, False)
def _repr_object_for_exception(self, obj):
try:
# there are a few objects in python that don't have __class__
obj_id = self._get_object_id(obj)
return f'{obj.__class__.__name__}({self.store_by}={obj_id})'
except AttributeError:
return str(obj)
def _call_method(self, method, obj, default, default_is_exception,
**kwargs):
"""Check all children object stores for the first one with the dataset."""
for store in self.backends.values():
if store.exists(obj, **kwargs):
return store.__getattribute__(method)(obj, **kwargs)
if default_is_exception:
raise default('objectstore, _call_method failed: %s on %s, kwargs: %s'
% (method, self._repr_object_for_exception(obj), str(kwargs)))
else:
return default
class DistributedObjectStore(NestedObjectStore):
"""
ObjectStore that defers to a list of backends.
When getting objects the first store where the object exists is used.
When creating objects they are created in a store selected randomly, but
with weighting.
"""
store_type = 'distributed'
def __init__(self, config, config_dict, fsmon=False):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the same attributes needed by
:class:`NestedObjectStore` plus:
* distributed_object_store_config_file
:type config_xml: ElementTree
:type fsmon: bool
:param fsmon: If True, monitor the file system for free space,
removing backends when they get too full.
"""
super().__init__(config, config_dict)
self.backends = {}
self.weighted_backend_ids = []
self.original_weighted_backend_ids = []
self.max_percent_full = {}
self.global_max_percent_full = config_dict.get("global_max_percent_full", 0)
random.seed()
for backend_def in config_dict["backends"]:
backened_id = backend_def["id"]
maxpctfull = backend_def.get("max_percent_full", 0)
weight = backend_def["weight"]
backend = build_object_store_from_config(config, config_dict=backend_def, fsmon=fsmon)
self.backends[backened_id] = backend
self.max_percent_full[backened_id] = maxpctfull
for _ in range(0, weight):
# The simplest way to do weighting: add backend ids to a
# sequence the number of times equalling weight, then randomly
# choose a backend from that sequence at creation
self.weighted_backend_ids.append(backened_id)
self.original_weighted_backend_ids = self.weighted_backend_ids
self.sleeper = None
if fsmon and (self.global_max_percent_full or [_ for _ in self.max_percent_full.values() if _ != 0.0]):
self.sleeper = Sleeper()
self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor)
self.filesystem_monitor_thread.setDaemon(True)
self.filesystem_monitor_thread.start()
log.info("Filesystem space monitor started")
@classmethod
def parse_xml(clazz, config_xml, legacy=False):
if legacy:
backends_root = config_xml
else:
backends_root = config_xml.find('backends')
backends = []
config_dict = {
'global_max_percent_full': float(backends_root.get('maxpctfull', 0)),
'backends': backends,
}
for b in [e for e in backends_root if e.tag == 'backend']:
store_id = b.get("id")
store_weight = int(b.get("weight", 1))
store_maxpctfull = float(b.get('maxpctfull', 0))
store_type = b.get("type", "disk")
store_by = b.get('store_by', None)
objectstore_class, _ = type_to_object_store_class(store_type)
backend_config_dict = objectstore_class.parse_xml(b)
backend_config_dict["id"] = store_id
backend_config_dict["weight"] = store_weight
backend_config_dict["max_percent_full"] = store_maxpctfull
backend_config_dict["type"] = store_type
if store_by is not None:
backend_config_dict["store_by"] = store_by
backends.append(backend_config_dict)
return config_dict
@classmethod
def from_xml(clazz, config, config_xml, fsmon=False):
legacy = False
if config_xml is None:
distributed_config = config.distributed_object_store_config_file
assert distributed_config is not None, \
"distributed object store ('object_store = distributed') " \
"requires a config file, please set one in " \
"'distributed_object_store_config_file')"
log.debug('Loading backends for distributed object store from %s', distributed_config)
config_xml = parse_xml(distributed_config).getroot()
legacy = True
else:
log.debug('Loading backends for distributed object store from %s', config_xml.get('id'))
config_dict = clazz.parse_xml(config_xml, legacy=legacy)
return clazz(config, config_dict, fsmon=fsmon)
def to_dict(self):
as_dict = super().to_dict()
as_dict["global_max_percent_full"] = self.global_max_percent_full
backends = []
for backend_id, backend in self.backends.items():
backend_as_dict = backend.to_dict()
backend_as_dict["id"] = backend_id
backend_as_dict["max_percent_full"] = self.max_percent_full[backend_id]
backend_as_dict["weight"] = len([i for i in self.original_weighted_backend_ids if i == backend_id])
backends.append(backend_as_dict)
as_dict["backends"] = backends
return as_dict
def shutdown(self):
"""Shut down. Kill the free space monitor if there is one."""
super().shutdown()
if self.sleeper is not None:
self.sleeper.wake()
def __filesystem_monitor(self):
while self.running:
new_weighted_backend_ids = self.original_weighted_backend_ids
for id, backend in self.backends.items():
maxpct = self.max_percent_full[id] or self.global_max_percent_full
pct = backend.get_store_usage_percent()
if pct > maxpct:
new_weighted_backend_ids = [_ for _ in new_weighted_backend_ids if _ != id]
self.weighted_backend_ids = new_weighted_backend_ids
self.sleeper.sleep(120) # Test free space every 2 minutes
def _create(self, obj, **kwargs):
"""The only method in which obj.object_store_id may be None."""
if obj.object_store_id is None or not self._exists(obj, **kwargs):
if obj.object_store_id is None or obj.object_store_id not in self.backends:
try:
obj.object_store_id = random.choice(self.weighted_backend_ids)
except IndexError:
raise ObjectInvalid('objectstore.create, could not generate '
'obj.object_store_id: %s, kwargs: %s'
% (str(obj), str(kwargs)))
log.debug("Selected backend '%s' for creation of %s %s"
% (obj.object_store_id, obj.__class__.__name__, obj.id))
else:
log.debug("Using preferred backend '%s' for creation of %s %s"
% (obj.object_store_id, obj.__class__.__name__, obj.id))
self.backends[obj.object_store_id].create(obj, **kwargs)
def _call_method(self, method, obj, default, default_is_exception, **kwargs):
object_store_id = self.__get_store_id_for(obj, **kwargs)
if object_store_id is not None:
return self.backends[object_store_id].__getattribute__(method)(obj, **kwargs)
if default_is_exception:
raise default('objectstore, _call_method failed: %s on %s, kwargs: %s'
% (method, self._repr_object_for_exception(obj), str(kwargs)))
else:
return default
def __get_store_id_for(self, obj, **kwargs):
if obj.object_store_id is not None:
if obj.object_store_id in self.backends:
return obj.object_store_id
else:
log.warning('The backend object store ID (%s) for %s object with ID %s is invalid'
% (obj.object_store_id, obj.__class__.__name__, obj.id))
# if this instance has been switched from a non-distributed to a
# distributed object store, or if the object's store id is invalid,
# try to locate the object
for id, store in self.backends.items():
if store.exists(obj, **kwargs):
log.warning('%s object with ID %s found in backend object store with ID %s'
% (obj.__class__.__name__, obj.id, id))
obj.object_store_id = id
return id
return None
class HierarchicalObjectStore(NestedObjectStore):
"""
ObjectStore that defers to a list of backends.
When getting objects the first store where the object exists is used.
When creating objects only the first store is used.
"""
store_type = 'hierarchical'
def __init__(self, config, config_dict, fsmon=False):
"""The default constructor. Extends `NestedObjectStore`."""
super().__init__(config, config_dict)
backends = {}
for order, backend_def in enumerate(config_dict["backends"]):
backends[order] = build_object_store_from_config(config, config_dict=backend_def, fsmon=fsmon)
self.backends = backends
@classmethod
def parse_xml(clazz, config_xml):
backends_list = []
for b in sorted(config_xml.find('backends'), key=lambda b: int(b.get('order'))):
store_type = b.get("type")
objectstore_class, _ = type_to_object_store_class(store_type)
backend_config_dict = objectstore_class.parse_xml(b)
backend_config_dict["type"] = store_type
backends_list.append(backend_config_dict)
return {"backends": backends_list}
def to_dict(self):
as_dict = super().to_dict()
backends = []
for backend in self.backends.values():
backend_as_dict = backend.to_dict()
backends.append(backend_as_dict)
as_dict["backends"] = backends
return as_dict
def _exists(self, obj, **kwargs):
"""Check all child object stores."""
for store in self.backends.values():
if store.exists(obj, **kwargs):
return True
return False
def _create(self, obj, **kwargs):
"""Call the primary object store."""
self.backends[0].create(obj, **kwargs)
def type_to_object_store_class(store, fsmon=False):
objectstore_class = None
objectstore_constructor_kwds = {}
if store == 'disk':
objectstore_class = DiskObjectStore
elif store == 's3':
from .s3 import S3ObjectStore
objectstore_class = S3ObjectStore
elif store == 'cloud':
from .cloud import Cloud
objectstore_class = Cloud
elif store == 'swift':
from .s3 import SwiftObjectStore
objectstore_class = SwiftObjectStore
elif store == 'distributed':
objectstore_class = DistributedObjectStore
objectstore_constructor_kwds["fsmon"] = fsmon
elif store == 'hierarchical':
objectstore_class = HierarchicalObjectStore
objectstore_constructor_kwds["fsmon"] = fsmon
elif store == 'irods':
from .irods import IRODSObjectStore
objectstore_class = IRODSObjectStore
elif store == 'azure_blob':
from .azure_blob import AzureBlobObjectStore
objectstore_class = AzureBlobObjectStore
elif store == 'pithos':
from .pithos import PithosObjectStore
objectstore_class = PithosObjectStore
# Disable the Pulsar object store for now until it receives some attention
# elif store == 'pulsar':
# from .pulsar import PulsarObjectStore
# return PulsarObjectStore(config=config, config_xml=config_xml)
return objectstore_class, objectstore_constructor_kwds
def build_object_store_from_config(config, fsmon=False, config_xml=None, config_dict=None):
"""
Invoke the appropriate object store.
Will use the `object_store_config_file` attribute of the `config` object to
configure a new object store from the specified XML file.
Or you can specify the object store type in the `object_store` attribute of
the `config` object. Currently 'disk', 's3', 'swift', 'distributed',
'hierarchical', 'irods', and 'pulsar' are supported values.
"""
from_object = 'xml'
if config is None and config_dict is not None and 'config' in config_dict:
# Build a config object from to_dict of an ObjectStore.
config = Bunch(**config_dict["config"])
elif config is None:
raise Exception("build_object_store_from_config sent None as config parameter and one cannot be recovered from config_dict")
if config_xml is None and config_dict is None:
config_file = config.object_store_config_file
if os.path.exists(config_file):
if config_file.endswith(".xml") or config_file.endswith(".xml.sample"):
# This is a top level invocation of build_object_store_from_config, and
# we have an object_store_conf.xml -- read the .xml and build
# accordingly
config_xml = parse_xml(config.object_store_config_file).getroot()
store = config_xml.get('type')
else:
with open(config_file) as f:
config_dict = yaml.safe_load(f)
from_object = 'dict'
store = config_dict.get('type')
else:
store = config.object_store
elif config_xml is not None:
store = config_xml.get('type')
elif config_dict is not None:
from_object = 'dict'
store = config_dict.get('type')
objectstore_class, objectstore_constructor_kwds = type_to_object_store_class(store, fsmon=fsmon)
if objectstore_class is None:
log.error(f"Unrecognized object store definition: {store}")
if from_object == 'xml':
return objectstore_class.from_xml(config=config, config_xml=config_xml, **objectstore_constructor_kwds)
else:
return objectstore_class(config=config, config_dict=config_dict, **objectstore_constructor_kwds)
def local_extra_dirs(func):
"""Non-local plugin decorator using local directories for the extra_dirs (job_work and temp)."""
def wraps(self, *args, **kwargs):
if kwargs.get('base_dir', None) is None:
return func(self, *args, **kwargs)
else:
for c in self.__class__.__mro__:
if c.__name__ == 'DiskObjectStore':
return getattr(c, func.__name__)(self, *args, **kwargs)
raise Exception("Could not call DiskObjectStore's %s method, does your "
"Object Store plugin inherit from DiskObjectStore?"
% func.__name__)
return wraps
def convert_bytes(bytes):
"""A helper function used for pretty printing disk usage."""
if bytes is None:
bytes = 0
bytes = float(bytes)
if bytes >= 1099511627776:
terabytes = bytes / 1099511627776
size = '%.2fTB' % terabytes
elif bytes >= 1073741824:
gigabytes = bytes / 1073741824
size = '%.2fGB' % gigabytes
elif bytes >= 1048576:
megabytes = bytes / 1048576
size = '%.2fMB' % megabytes
elif bytes >= 1024:
kilobytes = bytes / 1024
size = '%.2fKB' % kilobytes
else:
size = '%.2fb' % bytes
return size
def config_to_dict(config):
"""Dict-ify the portion of a config object consumed by the ObjectStore class and its subclasses.
"""
return {
'object_store_check_old_style': config.object_store_check_old_style,
'file_path': config.file_path,
'umask': config.umask,
'jobs_directory': config.jobs_directory,
'new_file_path': config.new_file_path,
'object_store_cache_path': config.object_store_cache_path,
'gid': config.gid,
}
class ObjectStorePopulator:
""" Small helper for interacting with the object store and making sure all
datasets from a job end up with the same object_store_id.
"""
def __init__(self, app, user):
self.object_store = app.object_store
self.object_store_id = None
self.user = user
def set_object_store_id(self, data):
# Create an empty file immediately. The first dataset will be
# created in the "default" store, all others will be created in
# the same store as the first.
data.dataset.object_store_id = self.object_store_id
try:
self.object_store.create(data.dataset)
except ObjectInvalid:
raise Exception('Unable to create output dataset: object store is full')
self.object_store_id = data.dataset.object_store_id # these will be the same thing after the first output
|
device.py
|
"""
Code for interacting with OpenVizsla devices.
"""
import time
import threading
from enum import Enum
from . import OVCaptureUSBSpeed, find_openvizsla_asset
from .ftdi import FTDIDevice
from .memory import OVMemoryWindow, USB334xMemoryWindow
from .sniffer import USBSniffer
from .firmware import OVFirmwarePackage
from .protocol import OVPacketDispatcher, LFSRTest, DummyHandler
from .io import IOConnection, SDRAMHandler
from .libov import FPGA_GetConfigStatus, HW_Init
class OVDevice(OVPacketDispatcher):
""" Class representing an OpenVizsla device. """
# Define the size of the OV device's on-board SDRAM capture buffer.
RAM_SIZE_MIB = 16
RAM_SIZE_BYTES = RAM_SIZE_MIB * 1024 * 1024
# Constants for working with the ULPI UCFG register.
UCFG_REGISTER_ACCESS_ACTIVE = 0x80
UCFG_REGISTER_ADDRESS_MASK = 0x3F
# Magic number for the USB334X.
USB334X_DEVICE_ID = 0x4240009
# Default firmware package name.
DEFAULT_FIRMWARE_PACKAGE_NAME = 'ov3.fwpkg'
def __init__(self, firmware_package=None, verbose=False):
""" Set up -- but do not open -- a connection to an OpenVizsla device. """
# Set up the OV device to handle packets.
super().__init__(verbose=verbose)
# If we weren't handed a firmware package, look for the default.
if firmware_package is None:
package_file = find_openvizsla_asset(self.DEFAULT_FIRMWARE_PACKAGE_NAME)
firmware_package = OVFirmwarePackage(package_file)
self.verbose = verbose
self.firmware = firmware_package
# Default to being unopened, and assume an unprogrammed FPGA.
self._is_open = False
self._fpga_loaded = False
# Create the FTDI connection to our OV device.
self.ftdi = FTDIDevice()
# Set up "memory windows" that allow us to access the OV device's I/O and
mmio_map = firmware_package.get_register_map()
self.regs = OVMemoryWindow(mmio_map, self.read_io_byte, self.write_io_byte)
self.ulpi_regs = USB334xMemoryWindow(self.read_ulpi_register, self.write_ulpi_register)
# Start off with an unvalidated ULPI clock.
self.ulpi_clock_validated = False
# Build our local packet handlers.
self._set_up_io_handlers()
def _set_up_io_handlers(self):
""" Registers the standard packet handler for communicating with OpenVizsla. """
# Build a simple subordinate write function that's closed over the current device,
# and which knows how to send data.
def send(packet):
self.send_packet(packet)
# Create our I/O connection and our USB sniffer handlers.
self.io = IOConnection(send, self.regs)
self.sniffer = USBSniffer(send)
# Create our SDRam read handler, and register our sniffer with it, so stored USB
# packets can be forwarded to the USB sniffer.
sdram_handler = SDRAMHandler(send)
sdram_handler.register_packet_handler(self.sniffer)
# Register our core packet handlers to handle received packets.
self.register_packet_handler(self.io)
self.register_packet_handler(LFSRTest(send))
self.register_packet_handler(self.sniffer)
self.register_packet_handler(sdram_handler)
self.register_packet_handler(DummyHandler(send))
def send_packet(self, raw_packet):
""" Sends a packet over our FTDI backend. """
if self.verbose:
print("< %s" % " ".join("%02x" % i for i in raw_packet))
# Send the data to the device.
self.ftdi.write(self.ftdi.INTERFACE_A, raw_packet, async_=False)
def __comms_thread_body(self):
""" Internal function that executes as our comms thread. """
# Define a callback that will handle receipt of data.
def comms_callback(received, prog):
""" Asynchronous callback issued when the FTDI device receives data. """
try:
self.handle_incoming_bytes(received)
return int(self.__comm_term)
except Exception as e:
self.__comm_term = True
self.__comm_exc = e
return 1
# Repeately try to read from the FTDI device, and handle its results.
# FIXME: replace the termination object with an threading.Event.
while not self.__comm_term:
self.ftdi.read_async(self.ftdi.INTERFACE_A, comms_callback, 8, 16)
# If a failure occurred in parsing, raise it out of our asynchronous context.
# TODO: exception should be locked
if self.__comm_exc:
raise self.__comm_exc
def _start_comms_thread(self):
""" Start the background thread that handles our core communication. """
self.commthread = threading.Thread(target=self.__comms_thread_body, daemon=True)
self.__comm_term = False
self.__comm_exc = None
self.commthread.start()
self.__comm_term = False
def open(self, reconfigure_fpga=False):
""" Opens a new connection to the OV device, and prepares it for use.
Args:
reconfigure_fpga -- If true, the FPGA will be configured even if it's already been programmed.:w
"""
if self._is_open:
raise ValueError("OVDevice doubly opened")
# Open our connection to our FTDI device.
rc = self.ftdi.open()
if rc:
error = IOError("couldn't open connection to our USB device!")
error.errno = rc
raise error
# Configure the FPGA, if necessary.
self.configure_fpga(self.firmware.get_bitstream_file(), not reconfigure_fpga)
# Start our background thread for comms.
self._start_comms_thread()
# Apply our default LED values.
self._apply_default_leds()
# Finally, mark ourselves as open.
self._is_open = True
def _apply_default_leds(self):
""" Sets up the default OV led controls for capture. """
# LEDs off
self.regs.LEDS_MUX_2 = 0
self.regs.LEDS_OUT = 0
# LEDS 0/1 to FTDI TX/RX
self.regs.LEDS_MUX_0 = 2
self.regs.LEDS_MUX_1 = 2
def close(self):
""" Terminates our connection to the OV device. """
# If the device has already been closed, we have nothing to do!
if not self._is_open:
return
self.__comm_term = True
self.commthread.join()
self.ftdi.close()
self._is_open = False
def __del__(self):
""" Finalizer that well attempt to close the device nicely, if it wasn't already. """
if self._is_open:
self.close()
def _stop_capture_to_ram(self):
""" Requests that the OV device stop capturing data from USB to its onboard SDRAM. """
self.regs.SDRAM_SINK_GO = 0
def _stop_streaming_ram_to_host(self):
""" Requests that the OV device stop streaming capture data from its SDRAM to the host. """
self.regs.SDRAM_HOST_READ_GO = 0
self.regs.CSTREAM_CFG = 0
def _device_stop_capture(self):
""" Requests that the device stop all aspects of capture. """
# TODO: we may want to provide an option to flush the SDRam buffer here before capture stops?
self._stop_capture_to_ram()
self._stop_streaming_ram_to_host()
def _initialize_sdram_ringbuffer(self, ringbuffer_size=None, ringbuffer_base=0):
""" Initialize the ringbuffer, and """
# If no ringbuffer size is provided, use the full size of the SDRAM.
if ringbuffer_size is None:
ringbuffer_size = self.RAM_SIZE_BYTES
# Figure out the extents of the ringbuffer in RAM.
ringbuffer_end = ringbuffer_base + ringbuffer_size
# Ensure the SDRAM isn't being used as either a source _or_ sink,
# by ensuring it's neither capturing USB data to the SDRAM nor
# streaming SDRAM contents to the host.
self._stop_capture_to_ram()
self._stop_streaming_ram_to_host()
# Reset the ringbuffer extents, and its
self.regs.SDRAM_SINK_RING_BASE = ringbuffer_base
self.regs.SDRAM_SINK_RING_END = ringbuffer_end
self.regs.SDRAM_HOST_READ_RING_BASE = ringbuffer_base
self.regs.SDRAM_HOST_READ_RING_END = ringbuffer_end
def _start_capture_to_ram(self):
""" Instruct the OV device to begin capturing USB data to the on-board SDRam. """
self.regs.SDRAM_SINK_GO = 1
def _start_streaming_ram_to_host(self):
""" Instruct the OV device to begin streaming its captured data to the host. """
self.regs.SDRAM_HOST_READ_GO = 1
self.regs.CSTREAM_CFG = 1
def _device_start_capture(self):
""" Requests that the device start a USB capture on its side. """
# TODO: we may want to provide an option to flush the SDRam buffer here before capture stops?
self._start_capture_to_ram()
self._start_streaming_ram_to_host()
def _initialize_performance_counters(self):
""" Reset the device's on-board performance counters. """
self.regs.OVF_INSERT_CTL = 1
self.regs.OVF_INSERT_CTL = 0
def _set_up_phy_for_capture(self, usb_speed):
""" Set up the PHY for a USB capture.
Args:
usb_speed -- The USB speed the communication is known to be operating at.
"""
# Set up our ULPI PHY's core functionality: set it powered on, in non-driving mode
# (so we can snoop), and set the relevant speed.
self.ulpi_regs.FUNC_CTL = \
int(usb_speed) | self.ulpi_regs.FuncCTLFlags.OPERATING_MODE_NON_DRIVING \
| self.ulpi_regs.FuncCTLFlags.PHY_POWERED
def register_sink(self, event_sink):
""" Registers a USBEventSink to receive any USB events.
Args:
event_sink -- The sniffer.USBEventSink object to receive any USB events that occur.
"""
self.sniffer.register_sink(event_sink)
def run_capture(self, usb_speed, statistics_callback=None, statistics_period=0.1, halt_callback=lambda _ : False, ):
""" Runs a USB capture from an OpenVizsla device.
Args:
usb_speed -- The USB speed the communication is believed to be operating at.
In the future, this should hopefully be somewhat auto-detectable, and this
argument will be optional; but for now it must be provided.
"""
# Set up the device for capture.
self._initialize_sdram_ringbuffer()
self._set_up_phy_for_capture(usb_speed)
# Start a capture on the device.
self._device_start_capture()
elapsed_time = 0.0
try:
# Continue until the user-supplied halt condition is met.
while not halt_callback(elapsed_time):
# If we have a statistics callback, call it.
if callable(statistics_callback):
statistics_callback(self, elapsed_time)
# Wait for the next statistics-interval to occur.
time.sleep(statistics_period)
elapsed_time = elapsed_time + statistics_period
finally:
self._device_stop_capture()
def ensure_capture_stopped(self):
""" Ensure that any running USBcapture has been cleanly terminated. """
self._device_stop_capture()
def fpga_configured(self, use_cached=False):
""" Returns true iff we know the current FPGA is programmed. """
assert self._is_open
if use_cached:
return self._fpga_loaded
else:
self._fpga_loaded = (FPGA_GetConfigStatus(self.ftdi) == 0)
return self._fpga_loaded
def configure_fpga(self, bitstream, skip_if_configured=False):
""" Programs the provided bitstream into the device's FPGA. """
fpga_configured = (FPGA_GetConfigStatus(self.ftdi) == 0)
# If the FPGA is already configured, and we're allowed to skip configuration, skip it!
if skip_if_configured and fpga_configured:
self._use_existing_configuration()
return
# If the bitstream is a file-like object, use it.
if not isinstance(bitstream, bytes) and hasattr(bitstream, 'read'):
# FIXME: Current bit_file code is heavily dependent on fstream ops
# and isn't nice to call with a python file-like object
#
# Workaround this by emitting a tempfile
import tempfile
import os
bitfile = tempfile.NamedTemporaryFile(delete=False)
try:
bitfile.write(bitstream.read())
bitfile.close()
HW_Init(self.ftdi, bitfile.name.encode('ascii'))
self._fpga_loaded = True
finally:
# Make sure we cleanup the tempfile
os.unlink(bitfile.name)
# Otherwise, if we have a set of raw bytes, upload that.:
elif isinstance(bitstream, bytes):
HW_Init(self.ftdi, bitstream)
self._fpga_loaded = True
else:
raise TypeError("bitstream must be bytes or file-like")
def _use_existing_configuration(self):
""" Attempts to initialize our hardware using the FPGA's existing configuration. """
HW_Init(self.ftdi, None)
def ulpi_clock_is_up(self):
""" Returns true iff the FPGA reports the ULPI as being up. """
if self.ulpi_clock_validated:
return True
self.ulpi_clock_validated = bool(self.regs.ucfg_stat & 0x1)
return self.ulpi_clock_validated
def read_ulpi_register(self, addr):
""" Reads the value of a ULPI register, by address. You likely want to touch the ulpi_regs view instead. """
assert self.ulpi_clock_is_up()
self.regs.ucfg_rcmd = self.UCFG_REGISTER_ACCESS_ACTIVE | (addr & self.UCFG_REGISTER_ADDRESS_MASK)
while self.regs.ucfg_rcmd & self.UCFG_REGISTER_ACCESS_ACTIVE:
pass
return self.regs.ucfg_rdata
def write_ulpi_register(self, address, value):
""" Writes the value of a ULPI register, by address. You likely want to touch the .ulpi_regs attribute instead. """
assert self.ulpi_clock_is_up()
self.regs.ucfg_wdata = value
self.regs.ucfg_wcmd = self.UCFG_REGISTER_ACCESS_ACTIVE | (address & self.UCFG_REGISTER_ADDRESS_MASK)
while self.regs.ucfg_wcmd & self.UCFG_REGISTER_ACCESS_ACTIVE:
pass
def read_io_byte(self, address):
""" Reads a byte from the I/O address space, by address. You likely want to touch the .regs attribute instead"""
return self.io.read(self.regs.resolve_address(address))
def write_io_byte(self, address, value):
""" Writes a byte to the I/O address space, by address. You likely want to touch the .regs attribute instead"""
return self.io.write(self.regs.resolve_address(address), value)
|
smooth_reset.py
|
from functools import partial
from threading import Thread
import wx
from spacq.iteration.variables import OutputVariable
from spacq.tool.box import sift
from ..tool.box import MessageDialog
class SmoothResetPanel(wx.Panel):
"""
A panel to change variables smoothly to and from preset values.
"""
def __init__(self, parent, global_store, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.global_store = global_store
# Panel.
panel_box = wx.BoxSizer(wx.VERTICAL)
## Reset.
reset_static_box = wx.StaticBox(self, label='Smooth reset')
reset_box = wx.StaticBoxSizer(reset_static_box, wx.VERTICAL)
panel_box.Add(reset_box, flag=wx.CENTER|wx.ALL, border=10)
### To zero.
self.to_button = wx.Button(self, label='To Zero')
self.Bind(wx.EVT_BUTTON, self.OnResetToZero, self.to_button)
reset_box.Add(self.to_button, flag=wx.EXPAND)
### From zero.
self.from_button = wx.Button(self, label='From Zero')
self.Bind(wx.EVT_BUTTON, self.OnResetFromZero, self.from_button)
reset_box.Add(self.from_button, flag=wx.EXPAND)
## To Value
self.toVal_button = wx.Button(self, label='To Value')
self.Bind(wx.EVT_BUTTON, self.OnResetToValue, self.toVal_button)
reset_box.Add(self.toVal_button, flag=wx.EXPAND)
### Steps.
steps_static_box = wx.StaticBox(self, label='Steps')
steps_box = wx.StaticBoxSizer(steps_static_box, wx.VERTICAL)
reset_box.Add(steps_box, flag=wx.EXPAND)
self.reset_steps_input = wx.SpinCtrl(self, min=1, initial=10)
steps_box.Add(self.reset_steps_input)
self.SetSizer(panel_box)
def choose_variables(self):
"""
Return all the selected variables, ensuring that their resources are valid.
"""
all_vars = sift(self.global_store.variables.values(), OutputVariable)
vars = [var for var in all_vars if var.enabled and var.use_const and var.resource_name]
missing_resources = []
unwritable_resources = []
for var in vars:
try:
if not self.global_store.resources[var.resource_name].writable:
unwritable_resources.append(var.resource_name)
except KeyError:
missing_resources.append(var.resource_name)
if missing_resources:
MessageDialog(self, ', '.join(missing_resources), 'Missing resources').Show()
if unwritable_resources:
MessageDialog(self, ', '.join(unwritable_resources), 'Unwritable resources').Show()
if missing_resources or unwritable_resources:
return None
return vars
def reset(self, sweep_setting):
vars = self.choose_variables()
if vars is None:
return
self.to_button.Disable()
self.from_button.Disable()
self.toVal_button.Disable()
def exception_callback(e):
MessageDialog(self, str(e), 'Error writing to resource').Show()
def sweep_all_vars():
try:
thrs = []
for var in vars:
resource = self.global_store.resources[var.resource_name]
if sweep_setting == 1:
value_from, value_to = 0, var.with_type(var.const)
elif sweep_setting == 0:
value_from, value_to = var.with_type(var.const), 0
else:
value_from, value_to = resource.value , var.with_type(var.const)
thr = Thread(target=resource.sweep, args=(value_from, value_to, self.reset_steps_input.Value),
kwargs={'exception_callback': partial(wx.CallAfter, exception_callback)})
thr.daemon = True
thrs.append(thr)
for thr in thrs:
thr.start()
for thr in thrs:
thr.join()
finally:
if self:
wx.CallAfter(self.to_button.Enable)
wx.CallAfter(self.from_button.Enable)
wx.CallAfter(self.toVal_button.Enable)
thr = Thread(target=sweep_all_vars)
thr.daemon = True
thr.start()
def OnResetToZero(self, evt=None):
self.reset(0)
def OnResetFromZero(self, evt=None):
self.reset(1)
def OnResetToValue(self, evt=None):
self.reset(2)
|
env.py
|
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import re
import os
import socket
import time
import threading
import operator
import datetime
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.dhcp import get_dhcp_handler
from azurelinuxagent.common.event import add_periodic, WALAEventOperation
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.protocol import get_protocol_util
from azurelinuxagent.common.protocol.wire import INCARNATION_FILE_NAME
from azurelinuxagent.common.utils import fileutil
from azurelinuxagent.common.utils.archive import StateArchiver
from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION
CACHE_PATTERNS = [
re.compile("^(.*)\.(\d+)\.(agentsManifest)$", re.IGNORECASE),
re.compile("^(.*)\.(\d+)\.(manifest\.xml)$", re.IGNORECASE),
re.compile("^(.*)\.(\d+)\.(xml)$", re.IGNORECASE)
]
MAXIMUM_CACHED_FILES = 50
ARCHIVE_INTERVAL = datetime.timedelta(hours=24)
def get_env_handler():
return EnvHandler()
class EnvHandler(object):
"""
Monitor changes to dhcp and hostname.
If dhcp client process re-start has occurred, reset routes, dhcp with fabric.
Monitor scsi disk.
If new scsi disk found, set timeout
"""
def __init__(self):
self.osutil = get_osutil()
self.dhcp_handler = get_dhcp_handler()
self.protocol_util = get_protocol_util()
self.stopped = True
self.hostname = None
self.dhcp_id = None
self.server_thread = None
self.dhcp_warning_enabled = True
self.last_archive = None
self.archiver = StateArchiver(conf.get_lib_dir())
def run(self):
if not self.stopped:
logger.info("Stop existing env monitor service.")
self.stop()
self.stopped = False
logger.info("Start env monitor service.")
self.dhcp_handler.conf_routes()
self.hostname = self.osutil.get_hostname_record()
self.dhcp_id = self.osutil.get_dhcp_pid()
self.start()
def is_alive(self):
return self.server_thread.is_alive()
def start(self):
self.server_thread = threading.Thread(target=self.monitor)
self.server_thread.setDaemon(True)
self.server_thread.start()
def monitor(self):
"""
Monitor firewall rules
Monitor dhcp client pid and hostname.
If dhcp client process re-start has occurred, reset routes.
Purge unnecessary files from disk cache.
"""
protocol = self.protocol_util.get_protocol()
while not self.stopped:
self.osutil.remove_rules_files()
if conf.enable_firewall():
success = self.osutil.enable_firewall(
dst_ip=protocol.endpoint,
uid=os.getuid())
add_periodic(
logger.EVERY_HOUR,
AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.Firewall,
is_success=success,
log_event=False)
timeout = conf.get_root_device_scsi_timeout()
if timeout is not None:
self.osutil.set_scsi_disks_timeout(timeout)
if conf.get_monitor_hostname():
self.handle_hostname_update()
self.handle_dhclient_restart()
self.archive_history()
time.sleep(5)
def handle_hostname_update(self):
curr_hostname = socket.gethostname()
if curr_hostname != self.hostname:
logger.info("EnvMonitor: Detected hostname change: {0} -> {1}",
self.hostname,
curr_hostname)
self.osutil.set_hostname(curr_hostname)
self.osutil.publish_hostname(curr_hostname)
self.hostname = curr_hostname
def handle_dhclient_restart(self):
if self.dhcp_id is None:
if self.dhcp_warning_enabled:
logger.warn("Dhcp client is not running. ")
self.dhcp_id = self.osutil.get_dhcp_pid()
# disable subsequent error logging
self.dhcp_warning_enabled = self.dhcp_id is not None
return
# the dhcp process has not changed since the last check
if self.osutil.check_pid_alive(self.dhcp_id.strip()):
return
new_pid = self.osutil.get_dhcp_pid()
if new_pid is not None and new_pid != self.dhcp_id:
logger.info("EnvMonitor: Detected dhcp client restart. "
"Restoring routing table.")
self.dhcp_handler.conf_routes()
self.dhcp_id = new_pid
def archive_history(self):
"""
Purge history if we have exceed the maximum count.
Create a .zip of the history that has been preserved.
"""
if self.last_archive is not None \
and datetime.datetime.utcnow() < \
self.last_archive + ARCHIVE_INTERVAL:
return
self.archiver.purge()
self.archiver.archive()
def stop(self):
"""
Stop server communication and join the thread to main thread.
"""
self.stopped = True
if self.server_thread is not None:
self.server_thread.join()
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask("")
@app.route("/")
def home():
return "The bot is online now!"
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
app.py
|
from flask import Flask, render_template, request
import subprocess
import os
import time
from threading import Thread
import fileinput
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
wifi_ap_array = scan_wifi_networks()
config_hash = config_file_hash()
return render_template('app.html', wifi_ap_array = wifi_ap_array, config_hash = config_hash)
@app.route('/manual_ssid_entry')
def manual_ssid_entry():
return render_template('manual_ssid_entry.html')
@app.route('/wpa_settings')
def wpa_settings():
config_hash = config_file_hash()
return render_template('wpa_settings.html', wpa_enabled = config_hash['wpa_enabled'], wpa_key = config_hash['wpa_key'])
@app.route('/save_credentials', methods = ['GET', 'POST'])
def save_credentials():
ssid = request.form['ssid']
wifi_key = request.form['wifi_key']
create_wpa_supplicant(ssid, wifi_key)
# Call set_ap_client_mode() in a thread otherwise the reboot will prevent
# the response from getting to the browser
def sleep_and_start_ap():
time.sleep(2)
set_ap_client_mode()
t = Thread(target=sleep_and_start_ap)
t.start()
return render_template('save_credentials.html', ssid = ssid)
@app.route('/save_wpa_credentials', methods = ['GET', 'POST'])
def save_wpa_credentials():
config_hash = config_file_hash()
wpa_enabled = request.form.get('wpa_enabled')
wpa_key = request.form['wpa_key']
if str(wpa_enabled) == '1':
update_wpa(1, wpa_key)
else:
update_wpa(0, wpa_key)
def sleep_and_reboot_for_wpa():
time.sleep(2)
os.system('reboot')
t = Thread(target=sleep_and_reboot_for_wpa)
t.start()
config_hash = config_file_hash()
return render_template('save_wpa_credentials.html', wpa_enabled = config_hash['wpa_enabled'], wpa_key = config_hash['wpa_key'])
######## FUNCTIONS ##########
def scan_wifi_networks():
iwlist_raw = subprocess.Popen(['iwlist', 'scan'], stdout=subprocess.PIPE)
ap_list, err = iwlist_raw.communicate()
ap_array = []
for line in ap_list.decode('utf-8').rsplit('\n'):
if 'ESSID' in line:
ap_ssid = line[27:-1]
if ap_ssid != '':
ap_array.append(ap_ssid)
return ap_array
def create_wpa_supplicant(ssid, wifi_key):
temp_conf_file = open('wpa_supplicant.conf.tmp', 'w')
temp_conf_file.write('ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev\n')
temp_conf_file.write('update_config=1\n')
temp_conf_file.write('\n')
temp_conf_file.write('network={\n')
temp_conf_file.write(' ssid="' + ssid + '"\n')
if wifi_key == '':
temp_conf_file.write(' key_mgmt=NONE\n')
else:
temp_conf_file.write(' psk="' + wifi_key + '"\n')
temp_conf_file.write(' }')
temp_conf_file.close
os.system('mv wpa_supplicant.conf.tmp /etc/wpa_supplicant/wpa_supplicant.conf')
def set_ap_client_mode():
os.system('rm -f /opt/symlite/wifi/host_mode')
#os.system('rm /etc/cron.raspiwifi/aphost_bootstrapper')
#os.system('cp /usr/lib/raspiwifi/reset_device/static_files/apclient_bootstrapper /etc/cron.raspiwifi/')
#os.system('chmod +x /etc/cron.raspiwifi/apclient_bootstrapper')
os.system('mv /etc/dnsmasq.conf.original /etc/dnsmasq.conf')
os.system('mv /etc/dhcpcd.conf.original /etc/dhcpcd.conf')
os.system('reboot')
def update_wpa(wpa_enabled, wpa_key):
with fileinput.FileInput('/opt/symlite/wifi/config/raspiwifi.conf', inplace=True) as raspiwifi_conf:
for line in raspiwifi_conf:
if 'wpa_enabled=' in line:
line_array = line.split('=')
line_array[1] = wpa_enabled
print(line_array[0] + '=' + str(line_array[1]))
if 'wpa_key=' in line:
line_array = line.split('=')
line_array[1] = wpa_key
print(line_array[0] + '=' + line_array[1])
if 'wpa_enabled=' not in line and 'wpa_key=' not in line:
print(line, end='')
def config_file_hash():
config_file = open('/opt/symlite/wifi/config/raspiwifi.conf')
config_hash = {}
for line in config_file:
line_key = line.split("=")[0]
line_value = line.split("=")[1].rstrip()
config_hash[line_key] = line_value
return config_hash
if __name__ == '__main__':
config_hash = config_file_hash()
if config_hash['ssl_enabled'] == "1":
app.run(host = '0.0.0.0', port = int(config_hash['server_port']), ssl_context='adhoc')
else:
app.run(host = '0.0.0.0', port = int(config_hash['server_port']))
|
test_gc.py
|
import unittest
import unittest.mock
from test.support import (verbose, refcount_test, run_unittest,
cpython_only, temp_dir, TESTFN, unlink,
import_module)
from test.support.script_helper import assert_python_ok, make_script
from test.support import threading_helper
import gc
import sys
import sysconfig
import textwrap
import threading
import time
import weakref
try:
from _testcapi import with_tp_del
except ImportError:
def with_tp_del(cls):
class C(object):
def __new__(cls, *args, **kwargs):
raise TypeError('requires _testcapi.with_tp_del')
return C
try:
from _testcapi import ContainerNoGC
except ImportError:
ContainerNoGC = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
@with_tp_del
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __tp_del__(self):
pass
if sysconfig.get_config_vars().get('PY_CFLAGS', ''):
BUILD_WITH_NDEBUG = ('-DNDEBUG' in sysconfig.get_config_vars()['PY_CFLAGS'])
else:
# Usually, sys.gettotalrefcount() is only present if Python has been
# compiled in debug mode. If it's missing, expect that Python has
# been released in release mode: with NDEBUG defined.
BUILD_WITH_NDEBUG = (not hasattr(sys, 'gettotalrefcount'))
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@cpython_only
def test_legacy_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A:
def __tp_del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
@cpython_only
def test_legacy_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A(object):
def __tp_del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example, disposed tuples are not freed, but reused.
# To minimize variations, though, we first store the get_count() results
# and check them at the end.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
x = []
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# This is less fragile than asserting that a equals 0.
self.assertLess(a, 5)
# Between the two calls to get_count(), at least one object was
# created (the list).
self.assertGreater(d, a)
@refcount_test
def test_collect_generations(self):
gc.collect()
# This object will "trickle" into generation N + 1 after
# each call to collect(N)
x = []
gc.collect(0)
# x is now in gen 1
a, b, c = gc.get_count()
gc.collect(1)
# x is now in gen 2
d, e, f = gc.get_count()
gc.collect(2)
# x is now in gen 3
g, h, i = gc.get_count()
# We don't check a, d, g since their exact values depends on
# internal implementation details of the interpreter.
self.assertEqual((b, c), (1, 0))
self.assertEqual((e, f), (0, 1))
self.assertEqual((h, i), (0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with threading_helper.start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
class UserInt(int):
pass
# Base class is object; no extra fields.
class UserClassSlots:
__slots__ = ()
# Base class is fixed size larger than object; no extra fields.
class UserFloatSlots(float):
__slots__ = ()
# Base class is variable size; no extra fields.
class UserIntSlots(int):
__slots__ = ()
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked(UserInt()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
self.assertFalse(gc.is_tracked(UserClassSlots()))
self.assertFalse(gc.is_tracked(UserFloatSlots()))
self.assertFalse(gc.is_tracked(UserIntSlots()))
def test_is_finalized(self):
# Objects not tracked by the always gc return false
self.assertFalse(gc.is_finalized(3))
storage = []
class Lazarus:
def __del__(self):
storage.append(self)
lazarus = Lazarus()
self.assertFalse(gc.is_finalized(lazarus))
del lazarus
gc.collect()
lazarus = storage.pop()
self.assertTrue(gc.is_finalized(lazarus))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_bug21435(self):
# This is a poor test - its only virtue is that it happened to
# segfault on Tim's Windows box before the patch for 21435 was
# applied. That's a nasty bug relying on specific pieces of cyclic
# trash appearing in exactly the right order in finalize_garbage()'s
# input list.
# But there's no reliable way to force that order from Python code,
# so over time chances are good this test won't really be testing much
# of anything anymore. Still, if it blows up, there's _some_
# problem ;-)
gc.collect()
class A:
pass
class B:
def __init__(self, x):
self.x = x
def __del__(self):
self.attr = None
def do_work():
a = A()
b = B(A())
a.attr = b
b.attr = a
do_work()
gc.collect() # this blows up (bad C pointer) when it fails
@cpython_only
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
import _testcapi
@_testcapi.with_tp_del
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __tp_del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout, b"")
return stderr
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
def test_gc_main_module_at_shutdown(self):
# Create a reference cycle through the __main__ module and check
# it gets collected at interpreter shutdown.
code = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_gc_ordinary_module_at_shutdown(self):
# Same as above, but with a non-__main__ module.
with temp_dir() as script_dir:
module = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
code = """if 1:
import sys
sys.path.insert(0, %r)
import gctest
""" % (script_dir,)
make_script(script_dir, 'gctest', module)
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_global_del_SystemExit(self):
code = """if 1:
class ClassWithDel:
def __del__(self):
print('__del__ called')
a = ClassWithDel()
a.link = a
raise SystemExit(0)"""
self.addCleanup(unlink, TESTFN)
with open(TESTFN, 'w') as script:
script.write(code)
rc, out, err = assert_python_ok(TESTFN)
self.assertEqual(out.strip(), b'__del__ called')
def test_get_stats(self):
stats = gc.get_stats()
self.assertEqual(len(stats), 3)
for st in stats:
self.assertIsInstance(st, dict)
self.assertEqual(set(st),
{"collected", "collections", "uncollectable"})
self.assertGreaterEqual(st["collected"], 0)
self.assertGreaterEqual(st["collections"], 0)
self.assertGreaterEqual(st["uncollectable"], 0)
# Check that collection counts are incremented correctly
if gc.isenabled():
self.addCleanup(gc.enable)
gc.disable()
old = gc.get_stats()
gc.collect(0)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"])
gc.collect(2)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"] + 1)
def test_freeze(self):
gc.freeze()
self.assertGreater(gc.get_freeze_count(), 0)
gc.unfreeze()
self.assertEqual(gc.get_freeze_count(), 0)
def test_get_objects(self):
gc.collect()
l = []
l.append(l)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=0)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=1)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=2)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
del l
gc.collect()
def test_get_objects_arguments(self):
gc.collect()
self.assertEqual(len(gc.get_objects()),
len(gc.get_objects(generation=None)))
self.assertRaises(ValueError, gc.get_objects, 1000)
self.assertRaises(ValueError, gc.get_objects, -1000)
self.assertRaises(TypeError, gc.get_objects, "1")
self.assertRaises(TypeError, gc.get_objects, 1.234)
def test_resurrection_only_happens_once_per_object(self):
class A: # simple self-loop
def __init__(self):
self.me = self
class Lazarus(A):
resurrected = 0
resurrected_instances = []
def __del__(self):
Lazarus.resurrected += 1
Lazarus.resurrected_instances.append(self)
gc.collect()
gc.disable()
# We start with 0 resurrections
laz = Lazarus()
self.assertEqual(Lazarus.resurrected, 0)
# Deleting the instance and triggering a collection
# resurrects the object
del laz
gc.collect()
self.assertEqual(Lazarus.resurrected, 1)
self.assertEqual(len(Lazarus.resurrected_instances), 1)
# Clearing the references and forcing a collection
# should not resurrect the object again.
Lazarus.resurrected_instances.clear()
self.assertEqual(Lazarus.resurrected, 1)
gc.collect()
self.assertEqual(Lazarus.resurrected, 1)
gc.enable()
def test_resurrection_is_transitive(self):
class Cargo:
def __init__(self):
self.me = self
class Lazarus:
resurrected_instances = []
def __del__(self):
Lazarus.resurrected_instances.append(self)
gc.collect()
gc.disable()
laz = Lazarus()
cargo = Cargo()
cargo_id = id(cargo)
# Create a cycle between cargo and laz
laz.cargo = cargo
cargo.laz = laz
# Drop the references, force a collection and check that
# everything was resurrected.
del laz, cargo
gc.collect()
self.assertEqual(len(Lazarus.resurrected_instances), 1)
instance = Lazarus.resurrected_instances.pop()
self.assertTrue(hasattr(instance, "cargo"))
self.assertEqual(id(instance.cargo), cargo_id)
gc.collect()
gc.enable()
def test_resurrection_does_not_block_cleanup_of_other_objects(self):
# When a finalizer resurrects objects, stats were reporting them as
# having been collected. This affected both collect()'s return
# value and the dicts returned by get_stats().
N = 100
class A: # simple self-loop
def __init__(self):
self.me = self
class Z(A): # resurrecting __del__
def __del__(self):
zs.append(self)
zs = []
def getstats():
d = gc.get_stats()[-1]
return d['collected'], d['uncollectable']
gc.collect()
gc.disable()
# No problems if just collecting A() instances.
oldc, oldnc = getstats()
for i in range(N):
A()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N) # instance object & its dict
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# But Z() is not actually collected.
oldc, oldnc = c, nc
Z()
# Nothing is collected - Z() is merely resurrected.
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 0)
self.assertEqual(c - oldc, 0)
self.assertEqual(nc - oldnc, 0)
# Z() should not prevent anything else from being collected.
oldc, oldnc = c, nc
for i in range(N):
A()
Z()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N)
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# The A() trash should have been reclaimed already but the
# 2 copies of Z are still in zs (and the associated dicts).
oldc, oldnc = c, nc
zs.clear()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 4)
self.assertEqual(c - oldc, 4)
self.assertEqual(nc - oldnc, 0)
gc.enable()
@unittest.skipIf(ContainerNoGC is None,
'requires ContainerNoGC extension type')
def test_trash_weakref_clear(self):
# Test that trash weakrefs are properly cleared (bpo-38006).
#
# Structure we are creating:
#
# Z <- Y <- A--+--> WZ -> C
# ^ |
# +--+
# where:
# WZ is a weakref to Z with callback C
# Y doesn't implement tp_traverse
# A contains a reference to itself, Y and WZ
#
# A, Y, Z, WZ are all trash. The GC doesn't know that Z is trash
# because Y does not implement tp_traverse. To show the bug, WZ needs
# to live long enough so that Z is deallocated before it. Then, if
# gcmodule is buggy, when Z is being deallocated, C will run.
#
# To ensure WZ lives long enough, we put it in a second reference
# cycle. That trick only works due to the ordering of the GC prev/next
# linked lists. So, this test is a bit fragile.
#
# The bug reported in bpo-38006 is caused because the GC did not
# clear WZ before starting the process of calling tp_clear on the
# trash. Normally, handle_weakrefs() would find the weakref via Z and
# clear it. However, since the GC cannot find Z, WR is not cleared and
# it can execute during delete_garbage(). That can lead to disaster
# since the callback might tinker with objects that have already had
# tp_clear called on them (leaving them in possibly invalid states).
callback = unittest.mock.Mock()
class A:
__slots__ = ['a', 'y', 'wz']
class Z:
pass
# setup required object graph, as described above
a = A()
a.a = a
a.y = ContainerNoGC(Z())
a.wz = weakref.ref(a.y.value, callback)
# create second cycle to keep WZ alive longer
wr_cycle = [a.wz]
wr_cycle.append(wr_cycle)
# ensure trash unrelated to this test is gone
gc.collect()
gc.disable()
# release references and create trash
del a, wr_cycle
gc.collect()
# if called, it means there is a bug in the GC. The weakref should be
# cleared before Z dies.
callback.assert_not_called()
gc.enable()
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(2)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 2)
@cpython_only
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncollectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
@unittest.skipIf(BUILD_WITH_NDEBUG,
'built with -NDEBUG')
def test_refcount_errors(self):
self.preclean()
# Verify the "handling" of objects with broken refcounts
# Skip the test if ctypes is not available
import_module("ctypes")
import subprocess
code = textwrap.dedent('''
from test.support import gc_collect, SuppressCrashReport
a = [1, 2, 3]
b = [a]
# Avoid coredump when Py_FatalError() calls abort()
SuppressCrashReport().__enter__()
# Simulate the refcount of "a" being too low (compared to the
# references held on it by live data), but keeping it above zero
# (to avoid deallocating it):
import ctypes
ctypes.pythonapi.Py_DecRef(ctypes.py_object(a))
# The garbage collector should now have a fatal error
# when it reaches the broken object
gc_collect()
''')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
# Verify that stderr has a useful error message:
self.assertRegex(stderr,
br'gcmodule\.c:[0-9]+: gc_decref: Assertion "gc_get_refs\(g\) > 0" failed.')
self.assertRegex(stderr,
br'refcount is too small')
# "address : 0x7fb5062efc18"
# "address : 7FB5062EFC18"
address_regex = br'[0-9a-fA-Fx]+'
self.assertRegex(stderr,
br'object address : ' + address_regex)
self.assertRegex(stderr,
br'object refcount : 1')
self.assertRegex(stderr,
br'object type : ' + address_regex)
self.assertRegex(stderr,
br'object type name: list')
self.assertRegex(stderr,
br'object repr : \[1, 2, 3\]')
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests, GCCallbackTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
ssd_model.py
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SSD300 Model Configuration.
References:
Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
Cheng-Yang Fu, Alexander C. Berg
SSD: Single Shot MultiBox Detector
arXiv:1512.02325
Ported from MLPerf reference implementation:
https://github.com/mlperf/reference/tree/ssd/single_stage_detector/ssd
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import re
import threading
import tensorflow as tf
from cnn_quantization.tf_cnn_benchmarks import constants
from cnn_quantization.tf_cnn_benchmarks import mlperf
from cnn_quantization.tf_cnn_benchmarks import ssd_constants
from cnn_quantization.tf_cnn_benchmarks.cnn_util import log_fn
from cnn_quantization.tf_cnn_benchmarks.models import model as model_lib
from cnn_quantization.tf_cnn_benchmarks.models import resnet_model
BACKBONE_MODEL_SCOPE_NAME = 'resnet34_backbone'
class SSD300Model(model_lib.CNNModel):
"""Single Shot Multibox Detection (SSD) model for 300x300 image datasets."""
def __init__(self, label_num=ssd_constants.NUM_CLASSES, batch_size=32,
learning_rate=1e-3, backbone='resnet34', params=None):
super(SSD300Model, self).__init__('ssd300', 300, batch_size, learning_rate,
params=params)
# For COCO dataset, 80 categories + 1 background = 81 labels
self.label_num = label_num
# Currently only support ResNet-34 as backbone model
if backbone != 'resnet34':
raise ValueError('Invalid backbone model %s for SSD.' % backbone)
mlperf.logger.log(key=mlperf.tags.BACKBONE, value=backbone)
# Number of channels and default boxes associated with the following layers:
# ResNet34 layer, Conv7, Conv8_2, Conv9_2, Conv10_2, Conv11_2
self.out_chan = [256, 512, 512, 256, 256, 256]
mlperf.logger.log(key=mlperf.tags.LOC_CONF_OUT_CHANNELS,
value=self.out_chan)
# Number of default boxes from layers of different scales
# 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4
self.num_dboxes = [4, 6, 6, 6, 4, 4]
mlperf.logger.log(key=mlperf.tags.NUM_DEFAULTS_PER_CELL,
value=self.num_dboxes)
# TODO(haoyuzhang): in order to correctly restore in replicated mode, need
# to create a saver for each tower before graph is finalized. Use variable
# manager for better efficiency.
self.backbone_savers = []
# Collected predictions for eval stage. It maps each image id in eval
# dataset to a dict containing the following information:
# source_id: raw ID of image
# raw_shape: raw shape of image
# pred_box: encoded box coordinates of prediction
# pred_scores: scores of classes in prediction
self.predictions = {}
# Global step when predictions are collected.
self.eval_global_step = 0
# Average precision. In asynchronous eval mode, this is the latest AP we
# get so far and may not be the results at current eval step.
self.eval_coco_ap = 0
# Process, queues, and thread for asynchronous evaluation. When enabled,
# create a separte process (async_eval_process) that continously pull
# intermediate results from the predictions queue (a multiprocessing queue),
# process them, and push final results into results queue (another
# multiprocessing queue). The main thread is responsible to push message
# into predictions queue, and start a separate thread to continuously pull
# messages from results queue to update final results.
# Message in predictions queue should be a tuple of two elements:
# (evaluation step, predictions)
# Message in results queue should be a tuple of two elements:
# (evaluation step, final results)
self.async_eval_process = None
self.async_eval_predictions_queue = None
self.async_eval_results_queue = None
self.async_eval_results_getter_thread = None
# The MLPerf reference uses a starting lr of 1e-3 at bs=32.
self.base_lr_batch_size = 32
def skip_final_affine_layer(self):
return True
def add_backbone_model(self, cnn):
# --------------------------------------------------------------------------
# Resnet-34 backbone model -- modified for SSD
# --------------------------------------------------------------------------
# Input 300x300, output 150x150
cnn.conv(64, 7, 7, 2, 2, mode='SAME_RESNET', use_batch_norm=True)
cnn.mpool(3, 3, 2, 2, mode='SAME')
resnet34_layers = [3, 4, 6, 3]
version = 'v1'
# ResNet-34 block group 1
# Input 150x150, output 75x75
for i in range(resnet34_layers[0]):
# Last argument forces residual_block to use projection shortcut, even
# though the numbers of input and output channels are equal
resnet_model.residual_block(cnn, 64, 1, version)
# ResNet-34 block group 2
# Input 75x75, output 38x38
for i in range(resnet34_layers[1]):
stride = 2 if i == 0 else 1
resnet_model.residual_block(cnn, 128, stride, version, i == 0)
# ResNet-34 block group 3
# This block group is modified: first layer uses stride=1 so that the image
# size does not change in group of layers
# Input 38x38, output 38x38
for i in range(resnet34_layers[2]):
# The following line is intentionally commented out to differentiate from
# the original ResNet-34 model
# stride = 2 if i == 0 else 1
resnet_model.residual_block(cnn, 256, stride, version, i == 0)
# ResNet-34 block group 4: removed final block group
# The following 3 lines are intentially commented out to differentiate from
# the original ResNet-34 model
# for i in range(resnet34_layers[3]):
# stride = 2 if i == 0 else 1
# resnet_model.residual_block(cnn, 512, stride, version, i == 0)
def add_inference(self, cnn):
cnn.use_batch_norm = True
cnn.batch_norm_config = {'decay': ssd_constants.BATCH_NORM_DECAY,
'epsilon': ssd_constants.BATCH_NORM_EPSILON,
'scale': True}
with tf.variable_scope(BACKBONE_MODEL_SCOPE_NAME):
self.add_backbone_model(cnn)
# --------------------------------------------------------------------------
# SSD additional layers
# --------------------------------------------------------------------------
def add_ssd_layer(cnn, depth, k_size, stride, mode):
return cnn.conv(depth, k_size, k_size, stride, stride,
mode=mode, use_batch_norm=False,
kernel_initializer=tf.contrib.layers.xavier_initializer())
# Activations for feature maps of different layers
self.activations = [cnn.top_layer]
# Conv7_1, Conv7_2
# Input 38x38, output 19x19
add_ssd_layer(cnn, 256, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))
# Conv8_1, Conv8_2
# Input 19x19, output 10x10
add_ssd_layer(cnn, 256, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))
# Conv9_1, Conv9_2
# Input 10x10, output 5x5
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 2, 'same'))
# Conv10_1, Conv10_2
# Input 5x5, output 3x3
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))
# Conv11_1, Conv11_2
# Input 3x3, output 1x1
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))
self.loc = []
self.conf = []
for nd, ac, oc in zip(self.num_dboxes, self.activations, self.out_chan):
l = cnn.conv(nd * 4, 3, 3, 1, 1, input_layer=ac,
num_channels_in=oc, activation=None, use_batch_norm=False,
kernel_initializer=tf.contrib.layers.xavier_initializer())
scale = l.get_shape()[-1]
# shape = [batch_size, nd * 4, scale, scale]
l = tf.reshape(l, [self.batch_size, nd, 4, scale, scale])
# shape = [batch_size, nd, 4, scale, scale]
l = tf.transpose(l, [0, 1, 3, 4, 2])
# shape = [batch_size, nd, scale, scale, 4]
self.loc.append(tf.reshape(l, [self.batch_size, -1, 4]))
# shape = [batch_size, nd * scale * scale, 4]
c = cnn.conv(nd * self.label_num, 3, 3, 1, 1, input_layer=ac,
num_channels_in=oc, activation=None, use_batch_norm=False,
kernel_initializer=tf.contrib.layers.xavier_initializer())
# shape = [batch_size, nd * label_num, scale, scale]
c = tf.reshape(c, [self.batch_size, nd, self.label_num, scale, scale])
# shape = [batch_size, nd, label_num, scale, scale]
c = tf.transpose(c, [0, 1, 3, 4, 2])
# shape = [batch_size, nd, scale, scale, label_num]
self.conf.append(tf.reshape(c, [self.batch_size, -1, self.label_num]))
# shape = [batch_size, nd * scale * scale, label_num]
# Shape of locs: [batch_size, NUM_SSD_BOXES, 4]
# Shape of confs: [batch_size, NUM_SSD_BOXES, label_num]
locs, confs = tf.concat(self.loc, 1), tf.concat(self.conf, 1)
# Pack location and confidence outputs into a single output layer
# Shape of logits: [batch_size, NUM_SSD_BOXES, 4+label_num]
logits = tf.concat([locs, confs], 2)
cnn.top_layer = logits
cnn.top_size = 4 + self.label_num
return cnn.top_layer
def get_learning_rate(self, global_step, batch_size):
rescaled_lr = self.get_scaled_base_learning_rate(batch_size)
# Defined in MLPerf reference model
boundaries = [160000, 200000]
boundaries = [b * self.base_lr_batch_size // batch_size for b in boundaries]
decays = [1, 0.1, 0.01]
learning_rates = [rescaled_lr * d for d in decays]
lr = tf.train.piecewise_constant(global_step, boundaries, learning_rates)
warmup_steps = int(118287 / batch_size * 5)
warmup_lr = (
rescaled_lr * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
def get_scaled_base_learning_rate(self, batch_size):
"""Calculates base learning rate for creating lr schedule.
In replicated mode, gradients are summed rather than averaged which, with
the sgd and momentum optimizers, increases the effective learning rate by
lr * num_gpus. Dividing the base lr by num_gpus negates the increase.
Args:
batch_size: Total batch-size.
Returns:
Base learning rate to use to create lr schedule.
"""
base_lr = self.learning_rate
if self.params.variable_update == 'replicated':
base_lr = self.learning_rate / self.params.num_gpus
scaled_lr = base_lr * (batch_size / self.base_lr_batch_size)
return scaled_lr
def _collect_backbone_vars(self):
backbone_vars = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='.*'+ BACKBONE_MODEL_SCOPE_NAME)
var_list = {}
# Assume variables in the checkpoint are following the naming convention of
# a model checkpoint trained with TF official model
# TODO(haoyuzhang): the following variable name parsing is hacky and easy
# to break if there is change in naming convention of either benchmarks or
# official models.
for v in backbone_vars:
# conv2d variable example (model <-- checkpoint):
# v/cg/conv24/conv2d/kernel:0 <-- conv2d_24/kernel
if 'conv2d' in v.name:
re_match = re.search(r'conv(\d+)/conv2d/(.+):', v.name)
if re_match:
layer_id = int(re_match.group(1))
param_name = re_match.group(2)
vname_in_ckpt = self._var_name_in_official_model_ckpt(
'conv2d', layer_id, param_name)
var_list[vname_in_ckpt] = v
# batchnorm varariable example:
# v/cg/conv24/batchnorm25/gamma:0 <-- batch_normalization_25/gamma
elif 'batchnorm' in v.name:
re_match = re.search(r'batchnorm(\d+)/(.+):', v.name)
if re_match:
layer_id = int(re_match.group(1))
param_name = re_match.group(2)
vname_in_ckpt = self._var_name_in_official_model_ckpt(
'batch_normalization', layer_id, param_name)
var_list[vname_in_ckpt] = v
return var_list
def _var_name_in_official_model_ckpt(self, layer_name, layer_id, param_name):
"""Return variable names according to convention in TF official models."""
vname_in_ckpt = layer_name
if layer_id > 0:
vname_in_ckpt += '_' + str(layer_id)
vname_in_ckpt += '/' + param_name
return vname_in_ckpt
def loss_function(self, inputs, build_network_result):
logits = build_network_result.logits
# Unpack model output back to locations and confidence scores of predictions
# Shape of pred_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of pred_label: [batch_size, NUM_SSD_BOXES, label_num]
pred_loc, pred_label = tf.split(logits, [4, self.label_num], 2)
# Shape of gt_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of gt_label: [batch_size, NUM_SSD_BOXES, 1]
# Shape of num_gt: [batch_size]
_, gt_loc, gt_label, num_gt = inputs
gt_label = tf.cast(gt_label, tf.int32)
box_loss = self._localization_loss(pred_loc, gt_loc, gt_label, num_gt)
class_loss = self._classification_loss(pred_label, gt_label, num_gt)
tf.summary.scalar('box_loss', tf.reduce_mean(box_loss))
tf.summary.scalar('class_loss', tf.reduce_mean(class_loss))
return class_loss + box_loss
def _localization_loss(self, pred_loc, gt_loc, gt_label, num_matched_boxes):
"""Computes the localization loss.
Computes the localization loss using smooth l1 loss.
Args:
pred_loc: a flatten tensor that includes all predicted locations. The
shape is [batch_size, num_anchors, 4].
gt_loc: a tensor representing box regression targets in
[batch_size, num_anchors, 4].
gt_label: a tensor that represents the classification groundtruth targets.
The shape is [batch_size, num_anchors, 1].
num_matched_boxes: the number of anchors that are matched to a groundtruth
targets, used as the loss normalizater. The shape is [batch_size].
Returns:
box_loss: a float32 representing total box regression loss.
"""
mask = tf.greater(tf.squeeze(gt_label), 0)
float_mask = tf.cast(mask, tf.float32)
smooth_l1 = tf.reduce_sum(tf.losses.huber_loss(
gt_loc, pred_loc,
reduction=tf.losses.Reduction.NONE
), axis=2)
smooth_l1 = tf.multiply(smooth_l1, float_mask)
box_loss = tf.reduce_sum(smooth_l1, axis=1)
return tf.reduce_mean(box_loss / num_matched_boxes)
def _classification_loss(self, pred_label, gt_label, num_matched_boxes):
"""Computes the classification loss.
Computes the classification loss with hard negative mining.
Args:
pred_label: a flatten tensor that includes all predicted class. The shape
is [batch_size, num_anchors, num_classes].
gt_label: a tensor that represents the classification groundtruth targets.
The shape is [batch_size, num_anchors, 1].
num_matched_boxes: the number of anchors that are matched to a groundtruth
targets. This is used as the loss normalizater.
Returns:
box_loss: a float32 representing total box regression loss.
"""
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
gt_label, pred_label, reduction=tf.losses.Reduction.NONE)
mask = tf.greater(tf.squeeze(gt_label), 0)
float_mask = tf.cast(mask, tf.float32)
# Hard example mining
neg_masked_cross_entropy = cross_entropy * (1 - float_mask)
relative_position = tf.contrib.framework.argsort(
tf.contrib.framework.argsort(
neg_masked_cross_entropy, direction='DESCENDING'))
num_neg_boxes = tf.minimum(
tf.to_int32(num_matched_boxes) * ssd_constants.NEGS_PER_POSITIVE,
ssd_constants.NUM_SSD_BOXES)
top_k_neg_mask = tf.cast(tf.less(
relative_position,
tf.tile(num_neg_boxes[:, tf.newaxis], (1, ssd_constants.NUM_SSD_BOXES))
), tf.float32)
class_loss = tf.reduce_sum(
tf.multiply(cross_entropy, float_mask + top_k_neg_mask), axis=1)
return tf.reduce_mean(class_loss / num_matched_boxes)
def add_backbone_saver(self):
# Create saver with mapping from variable names in checkpoint of backbone
# model to variables in SSD model
backbone_var_list = self._collect_backbone_vars()
self.backbone_savers.append(tf.train.Saver(backbone_var_list))
def load_backbone_model(self, sess, backbone_model_path):
for saver in self.backbone_savers:
saver.restore(sess, backbone_model_path)
def get_input_data_types(self, subset):
if subset == 'validation':
return [self.data_type, tf.float32, tf.float32, tf.float32, tf.int32]
return [self.data_type, tf.float32, tf.float32, tf.float32]
def get_input_shapes(self, subset):
"""Return encoded tensor shapes for train and eval data respectively."""
if subset == 'validation':
# Validation data shapes:
# 1. images
# 2. ground truth locations of boxes
# 3. ground truth classes of objects in boxes
# 4. source image IDs
# 5. raw image shapes
return [
[self.batch_size, self.image_size, self.image_size, self.depth],
[self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 4],
[self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 1],
[self.batch_size],
[self.batch_size, 3],
]
# Training data shapes:
# 1. images
# 2. ground truth locations of boxes
# 3. ground truth classes of objects in boxes
# 4. numbers of objects in images
return [
[self.batch_size, self.image_size, self.image_size, self.depth],
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 4],
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 1],
[self.batch_size]
]
def accuracy_function(self, inputs, logits):
"""Returns the ops to measure the mean precision of the model."""
try:
from cnn_quantization.tf_cnn_benchmarks import ssd_dataloader # pylint: disable=g-import-not-at-top
from tensorflow_models.object_detection.box_coders import faster_rcnn_box_coder # pylint: disable=g-import-not-at-top
from tensorflow_models.object_detection.core import box_coder # pylint: disable=g-import-not-at-top
from tensorflow_models.object_detection.core import box_list # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('To use the COCO dataset, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models and tensorflow/models/research to '
'the PYTHONPATH, and compile the protobufs; '
'To evaluate using COCO'
'metric, download and install Python COCO API from'
'https://github.com/cocodataset/cocoapi')
# Unpack model output back to locations and confidence scores of predictions
# pred_locs: relative locations (coordiates) of objects in all SSD boxes
# shape: [batch_size, NUM_SSD_BOXES, 4]
# pred_labels: confidence scores of objects being of all categories
# shape: [batch_size, NUM_SSD_BOXES, label_num]
pred_locs, pred_labels = tf.split(logits, [4, self.label_num], 2)
ssd_box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=ssd_constants.BOX_CODER_SCALES)
anchors = box_list.BoxList(
tf.convert_to_tensor(ssd_dataloader.DefaultBoxes()('ltrb')))
pred_boxes = box_coder.batch_decode(
encoded_boxes=pred_locs, box_coder=ssd_box_coder, anchors=anchors)
pred_scores = tf.nn.softmax(pred_labels, axis=2)
# TODO(haoyuzhang): maybe use `gt_boxes` and `gt_classes` for visualization.
_, gt_boxes, gt_classes, source_id, raw_shape = inputs # pylint: disable=unused-variable
return {
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.PRED_BOXES): pred_boxes,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.PRED_SCORES): pred_scores,
# TODO(haoyuzhang): maybe use these values for visualization.
# constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_boxes': gt_boxes,
# constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_classes': gt_classes,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.SOURCE_ID): source_id,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.RAW_SHAPE): raw_shape
}
def postprocess(self, results):
"""Postprocess results returned from model."""
try:
from cnn_quantization.tf_cnn_benchmarks import coco_metric # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('To use the COCO dataset, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models and tensorflow/models/research to '
'the PYTHONPATH, and compile the protobufs; '
'To evaluate using COCO'
'metric, download and install Python COCO API from'
'https://github.com/cocodataset/cocoapi')
pred_boxes = results[ssd_constants.PRED_BOXES]
pred_scores = results[ssd_constants.PRED_SCORES]
# TODO(haoyuzhang): maybe use these values for visualization.
# gt_boxes = results['gt_boxes']
# gt_classes = results['gt_classes']
source_id = results[ssd_constants.SOURCE_ID]
raw_shape = results[ssd_constants.RAW_SHAPE]
# COCO evaluation requires processing COCO_NUM_VAL_IMAGES exactly once. Due
# to rounding errors (i.e., COCO_NUM_VAL_IMAGES % batch_size != 0), setting
# `num_eval_epochs` to 1 is not enough and will often miss some images. We
# expect user to set `num_eval_epochs` to >1, which will leave some unused
# images from previous steps in `predictions`. Here we check if we are doing
# eval at a new global step.
if results['global_step'] > self.eval_global_step:
self.eval_global_step = results['global_step']
self.predictions.clear()
for i, sid in enumerate(source_id):
self.predictions[int(sid)] = {
ssd_constants.PRED_BOXES: pred_boxes[i],
ssd_constants.PRED_SCORES: pred_scores[i],
ssd_constants.SOURCE_ID: source_id[i],
ssd_constants.RAW_SHAPE: raw_shape[i]
}
# COCO metric calculates mAP only after a full epoch of evaluation. Return
# dummy results for top_N_accuracy to be compatible with benchmar_cnn.py.
if len(self.predictions) >= ssd_constants.COCO_NUM_VAL_IMAGES:
log_fn('Got results for all {:d} eval examples. Calculate mAP...'.format(
ssd_constants.COCO_NUM_VAL_IMAGES))
annotation_file = os.path.join(self.params.data_dir,
ssd_constants.ANNOTATION_FILE)
# Size of predictions before decoding about 15--30GB, while size after
# decoding is 100--200MB. When using async eval mode, decoding takes
# 20--30 seconds of main thread time but is necessary to avoid OOM during
# inter-process communication.
decoded_preds = coco_metric.decode_predictions(self.predictions.values())
self.predictions.clear()
if self.params.collect_eval_results_async:
def _eval_results_getter():
"""Iteratively get eval results from async eval process."""
while True:
step, eval_results = self.async_eval_results_queue.get()
self.eval_coco_ap = eval_results['COCO/AP']
mlperf.logger.log_eval_accuracy(
self.eval_coco_ap, step, self.batch_size * self.params.num_gpus,
ssd_constants.COCO_NUM_TRAIN_IMAGES)
if self.reached_target():
# Reached target, clear all pending messages in predictions queue
# and insert poison pill to stop the async eval process.
while not self.async_eval_predictions_queue.empty():
self.async_eval_predictions_queue.get()
self.async_eval_predictions_queue.put('STOP')
break
if not self.async_eval_process:
# Limiting the number of messages in predictions queue to prevent OOM.
# Each message (predictions data) can potentially consume a lot of
# memory, and normally there should only be few messages in the queue.
# If often blocked on this, consider reducing eval frequency.
self.async_eval_predictions_queue = multiprocessing.Queue(2)
self.async_eval_results_queue = multiprocessing.Queue()
# Reason to use a Process as opposed to Thread is mainly the
# computationally intensive eval runner. Python multithreading is not
# truly running in parallel, a runner thread would get significantly
# delayed (or alternatively delay the main thread).
self.async_eval_process = multiprocessing.Process(
target=coco_metric.async_eval_runner,
args=(self.async_eval_predictions_queue,
self.async_eval_results_queue,
annotation_file))
self.async_eval_process.daemon = True
self.async_eval_process.start()
self.async_eval_results_getter_thread = threading.Thread(
target=_eval_results_getter, args=())
self.async_eval_results_getter_thread.daemon = True
self.async_eval_results_getter_thread.start()
self.async_eval_predictions_queue.put(
(self.eval_global_step, decoded_preds))
return {'top_1_accuracy': 0, 'top_5_accuracy': 0.}
eval_results = coco_metric.compute_map(decoded_preds, annotation_file)
self.eval_coco_ap = eval_results['COCO/AP']
ret = {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}
for metric_key, metric_value in eval_results.items():
ret[constants.SIMPLE_VALUE_RESULT_PREFIX + metric_key] = metric_value
mlperf.logger.log_eval_accuracy(self.eval_coco_ap, self.eval_global_step,
self.batch_size * self.params.num_gpus,
ssd_constants.COCO_NUM_TRAIN_IMAGES)
return ret
log_fn('Got {:d} out of {:d} eval examples.'
' Waiting for the remaining to calculate mAP...'.format(
len(self.predictions), ssd_constants.COCO_NUM_VAL_IMAGES))
return {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}
def get_synthetic_inputs(self, input_name, nclass):
"""Generating synthetic data matching real data shape and type."""
inputs = tf.random_uniform(
self.get_input_shapes('train')[0], dtype=self.data_type)
inputs = tf.contrib.framework.local_variable(inputs, name=input_name)
boxes = tf.random_uniform(
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 4], dtype=tf.float32)
classes = tf.random_uniform(
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 1], dtype=tf.float32)
nboxes = tf.random_uniform(
[self.batch_size], minval=1, maxval=10, dtype=tf.float32)
return (inputs, boxes, classes, nboxes)
def reached_target(self):
return (self.params.stop_at_top_1_accuracy and
self.eval_coco_ap >= self.params.stop_at_top_1_accuracy)
|
d2.py
|
'''
* The MIT License (MIT)
* Copyright (c) Arturo Rodriguez All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import dash
import dash_table
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import pathlib
import requests
from flask import request
import time
import threading
import logging
import json
import QuantApp.Kernel as qak
# This app can be access through
# http://localhost/dash/9a7adf48-183f-4d44-8ab2-c0afd1610c71/dash.py?uid=30be80ea-835b-4524-a43a-21742aae77fa
# Plotly/Dash code as required by CoFlows
dash_init = True
__assetsFolder = '/app/mnt/Files/assets'
# Function to get arguments from URL
def getArgs(_args):
args = request.headers['Referer']
if (not args is None) and args.find('?') >= 0:
args = args[args.find('?') + 1:]
args = args.split('&')
args = { element[0]: element[1] for element in map(lambda x: x.split('='), args) }
return args
return None
def run(port, path):
global dash_init, __assetsFolder
if dash_init:
dash_init = False
def inner():
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# shutdown existing dash
try:
requests.get(url = 'http://localhost:' + str(port) + path + 'shutdown')
time.sleep(5)
# print('done waiting...')
except:
pass
app = dash.Dash(
__name__,
meta_tags=[{"name": "viewport", "content": "width=device-width"}],
url_base_pathname = path,
assets_folder=__assetsFolder
)
app.url_base_pathname = path
# ALL DASH CODE MUST START HERE
# USER: Start Layout Define
app.layout = html.Div(
children=[
# Leave this section here. It used to access the
# URL and store the sessions permission and secret information
# Start -----
dcc.Location(id='url', refresh=False),
html.Div(
children= [
html.Div(id='link_id', children=[]),
html.Div(id='perm_id', style=dict(display='none')),
html.Div(id='secret', style=dict(display='none')),
],
),
# Finish -----
# Main layout section. Please edit this here to customise
html.Div(
id='main_id',
style=dict(display='none'),
className='row',
children=[
html.Div(
id='title_div',
children= [
html.H3(id='title', children='...'),
],
),
html.Div(
className='row',
style=dict(width='98.5%'),
children=[
html.Div(
style=dict(width='98.5%'),
className='row',
id='timeseries_chart_output_div',
children=[
html.Br(),
html.Div(
className='row',
children=[
html.Div(
className='twelve columns',
children=[
dcc.Graph(
id='timeseries_chart_output',
)
]
)
]
)
]
),
]
)
]
),
# Layout that is shown while the app is loading
html.Div(
id='loading_id',
style=dict(display='none'),
className='row',
children=[
html.Div(
children= [
html.H3(children='CoFlows App'),
html.H4(children='Loading....'),
]
)
]
),
# Layout that is shown when access to the group is denied
html.Div(
id='denied_id',
style=dict(display='none'),
className='row',
children=[
html.Div(
children= [
html.H3(children='CoFlows App'),
html.H4(children='Permission denied....'),
]
)
]
)
]
)
# Sets related permission, user secret and check permissions from URL
@app.callback(
[
Output('perm_id', 'children'),
Output('secret', 'children'),
Output('main_id', 'style'),
Output('denied_id', 'style'),
Output('loading_id', 'style'),
],
[
Input('url','search')
])
def set_start_values(args):
args = getArgs(args)
if args is not None:
uid = args['uid']
cuser = qak.User.ContextUserBySecret(uid)
qgroup = qak.Group.FindGroup('9a7adf48-183f-4d44-8ab2-c0afd1610c71')
perm = qgroup.PermissionSecret(uid)
if perm > -1:
return [perm, uid, dict(), dict(display='none'), dict(display='none')]
elif perm == -1:
return [perm, uid, dict(display='none'), dict(), dict(display='none')]
else:
return [perm, uid, dict(display='none'), dict(display='none'), dict()]
return [-1, '', dict(display='none'), dict(), dict(display='none')]
# Sample function that generates timeseries
# Please edit this to customise your logic
@app.callback(
[
Output('title', 'children'),
Output('timeseries_chart_output', 'figure')
],
[
Input('secret', 'children'),
Input('perm_id', 'children')
]
)
def set_timeseries_chart(secret, perm_id):
charts = []
yaxis_type = 'Linear'
cuser = qak.User.ContextUserBySecret(secret)
x_axis = [0, 1, 2, 3, 4]
y_axis = [0, 1, 2, 3, 4]
charts.append(dict(
name='line 0',
x = x_axis,
y = y_axis,
mode='lines',
))
return [
cuser.FirstName + ' has permission ' + str(perm_id),
dict(
data = charts,
layout = dict(
yaxis={
'title': 'Y Axis',
'type': 'linear' if yaxis_type == 'Linear' else 'log',
},
margin={'l': 40, 'b': 30, 't': 10, 'r': 0},
hovermode='closest',
height=800,
legend = dict(
orientation = 'h'
)
)
)
]
# ALL DASH CODE MUST END HERE
# necessary to shutdown server incase the code change
@app.server.route(path + 'shutdown', methods=['GET'])
def shutdown():
try:
# Place to delete all variables from memory
pass
except Exception as e:
pass
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return app.run_server(port=port, debug=False, threaded=True)
server = threading.Thread(target = inner)
server.start()
|
server_11.py
|
import socket
import sys
import time
import os
import argparse
global proto
import multiprocessing as mp
from multiprocessing import *
import subprocess
import multiprocessing.reduction
import threading
loc = []
global dump
auth = []
with open("auth.f", 'r') as ass:
for lin in ass:
ips = lin.strip()
auth.append(ips)
#ipx = []
#with open("ban.f", 'r') as file:
#for lin2 in file:
# lin2 = lin2.strip()
# ipx.append(lin2)
dump = []
with open("registr.f", 'r') as r:
for lines in r:
word = lines.strip()
loc.append(word)
proto = socket.getprotobyname('tcp')
global today
tlog = []
class unknown_1(Exception):
pass
def bind(ip, port, socket):
try:
vr = socket.bind((ip, port))
except Exception as failure:
print("[-] Couldn't bind.. . . Reason: %s " %(failure,))
return False
if vr == None:
return "[*] Listening as %s:%d" %(ip,port)
else:
raise unknown_1("Unknown error occured . .. ")
return False
def send(socket, data, ip, port):
buffer = data
if ip not in auth:
try:
socket.sendto("Your IP is not authorised!\n".encode(),(ip, int(port)))
return "code1"
except:
renv()
string = f'''
[Hello, {ip}]
This is Private Classified Files Server
Name of server: cvs231:fl
[Authorization 10 seconds]
What is your ID? !\n'''
try:
socket.sendto(string.encode(),(ip,int(port)))
except:
pass
try:
#socket.settimeout(10)
id = socket.recv(4096).decode().strip()
#print(id)
return id
#print(id)
except Exception as f:
print(f)
pass
def authenticate(data, log, func, ip, socket, port):
#print(data)
#print(loc)
if data == "ID is none":
socket.sendto(f"Failure: {data}\n".encode() + b"Closing connection . . ." + f"Your IP ({ip}) is logged!".encode())
log.write("failure::%s" %(ip) + "Attempt: %s " %(data,))
log.close()
return "[-] Authentication failure from: %s" %(ip)
#socket.send(f"Failure: {data}".encode())
try:
socket.sendto(f"Failure: {data}".encode(),(ip,int(port)) + b"Closing connection . . ." + b"Your IP (%s) is logged! And was temporary banned!" %(ip,), (ip, int(port)))
pine = open("ban.f", 'a')
pine.write(ip + "\n")
pine.close()
socket.close()
except Exception as f:
print(f)
pass
log.close()
if data not in loc:
socket.sendto(f"Failure: {data}\n".encode() + b"Closing connection . . ." + f"Your IP ({ip}) is logged! And was temporary banned!".encode(), (ip, int(port)))
pine = open("ban.f", 'a')
pine.write(ip + "\n")
pine.close()
log.write("failure::%s" %(ip) + "Attempt: %s " %(data,))
log.close()
socket.close()
return "[-] Authentication failure from: %s " %(ip)
#socket.send(f"Failure: {data}\n".encode())
try:
socket.sendto(f"Failure: {data}\n".encode() + b"Closing connection . . ." + f"Your IP ({ip}) is logged! And was temporary banned!".encode(), (ip, int(port)))
pine = open("ban.f", 'a')
pine.write(ip + "\n")
pine.close()
socket.close()
except Exception as f:
print(f)
pass
#log.write("failure::%s " %(ip))
#log.close()
if data in loc:
try:
socket.sendto(f"Completed: {data}\n".encode(),(ip, int(port)))
return "code2"
except:
pass
def send_help(socket):
socket.send('''
help : shows this menu
--------------------------
banip : Ban an IP
unban : Unban an IP
--------------------------
shell : Remote Shell
shutd : Shutdowns the server
conec : Server connects to other server
logg : Turn on logging on commands
upld : Upload a File to server
dwnl : Download a file
chpw : Changes password
stdo : See today log
seal : See all logs
--------------------------
logout : Logout\n'''.encode())
def client(ip,console,port,socket,log):
def ss():
logs = 0
while True:
vr = socket.recv(18413).decode().strip()
if logs == 1:
log.write(vr + "\n")
log.close()
if vr == 'help':
send_help(socket)
elif vr == 'chpw':
passwd = socket.recv(134813).decode()
with open("registr.f", 'w') as file:
file.write(passwd)
file.close()
socket.send(f"[*] Password was changed to {passwd}".encode())
elif vr == "stdo":
#x = ','.join(str(x)for x in range(cringe))
prizmis = "\n".join(str(x) for x in tlog)
socket.send(prizmis.encode())
elif vr == 'upld':
lines = socket.recv(18413).decode().strip()
lines = lines
print(lines)
brb = []
#vr = socket.recv(1381).decode()
#print(vr)
name = socket.recv(148313).decode().strip()
name = name
print(name)
#lines =
rr = open(name, 'wb')
for crs in range(int(lines)):
conts = socket.recv(14813)
print(conts)
#print(conts)
rr.write(conts)
socket.send("[*] Written in file successfully!".encode())
rr.close()
ss()
elif vr == 'dwnl':
def getls(socket):
sub = subprocess.Popen("ls", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ok = sub.stdout.read().decode()
return ok
get_ls = getls(socket=socket)
socket.send(get_ls.encode())
fils = socket.recv(1831233).decode()
word = 0
with open(fils, 'r') as file:
for lines in file:
word += 1
wos = str(word) + "\x0A"
socket.send(wos.encode())
brb = []
with open(fils, 'rb') as file:
for lines in file:
wordex = lines.strip()
brb.append(wordex)
for lx in range(int(word)):
for cons in brb:
socket.send(cons)
#rwr = socket.recv(1831).decode()
#if 'fin' in rwr:
ss()
elif vr == 'banip':
x1 = open("ban.f", 'a')
#socket.send("Enter IP\n".encode())
ip = socket.recv(14813).decode().strip()
print(ip)
x1.write("\x0A" + ip + "\x0A")
x1.close()
socket.send(f"[*] {ip} IP is banned!".encode())
ss()
elif vr == 'seal':
rwr = []
with open("log.f") as f:
for lines in f:
word = lines.strip()
rwr.append(word)
payload = '\n'.join(str(x) for x in rwr)
socket.send(payload.encode())
elif vr == 'logg':
ans = socket.recv(148131).decode().strip()
if ans == 'logtrue':
socket.send("Logging ==> True".encode())
logs += 1
else:
socket.send("Logging ==> False".encode())
logs = 0
elif vr == 'unban':
x1 = open("ban.f", 'w')
socket.send("Enter IP\n".encode())
ip =socket.recv(148139).decode().strip()
if ip in x1:
lin.write("Replace")
lin.close()
elif vr == 'shell':
socket.send("[*] Shell openned!".encode())
def mt():
while True:
def shelled(command, socket):
if 'cd' in command:
try:
os.chdir(command)
mt()
except Exception as f:
print(f)
socket.send("[-] Unknown directory!\n".encode())
mt()
elif '?' in command:
socket.send(b'''
? : Shows this menu
cd : Go to a different directory
bck : Goes back to everything\n''')
mt()
rn = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ok = rn.stdout.read().decode().strip()
if ok == '':
pass
#return "None item"
if ok == None:
pass #return "None Item"
fl = rn.stderr.read().decode().strip()
if fl == "":
pass # return "None item"
if fl == None:
return "None Item"
return ok + fl
#sub = subprocess.Popen()
cmd = socket.recv(10113).decode().strip()
if 'bck'in cmd:
turn = 1
break
output = shelled(command=cmd, socket=socket)
socket.send(output.encode())
mt()
if turn == 1:
ss()
mt()
elif vr == 'logout':
socket.send("Logging out .. . . ".encode())
print("[*] Client logged off .. ")
break
elif vr == 'shutd':
socket.close()
exit()
elif vr == 'conec':
def connection_():
def connect(ip, port, msg):
def send_msg(socket,msg):
payload = msg + "\n"
socket.send(payload.encode())
try:
socket.settimeout(5)
except:
return "Timed out"
ans = socket.recv(14812).decode()
return ans
#prot = socket.getprotobyname("tcp")
import socket
arg = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ol = arg.connect((ip, port))
send = send_msg(socket=arg, msg=msg)
socket.send("Send IP\n".encode())
ipsec = socket.recv(18413).decode().strip()
socket.send("Specify a port".encode())
iport = socket.recv(14339).decode().strip()
socket.send("Specify a messasge".encode())
msg = socket.recv(143813).decode().strip()
ol = connect(ip=ipsec, port=int(iport), msg=msg)
if ol == None:
ol = "No Data Received!"
socket.send(ol.encode())
ss()
connection_()
else:
socket.send("[-] Unknown/forbidden command\n".encode())
ss()
ss()
def server(ip, port):
print("[*] Listening as %s:%d"%(ip,port))
if ':' not in ip:
test = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto)
# Binding the address and port
ol = bind(ip=ip, port=port, socket=test)
#if ol != False:
#print("[-] Something gone wrong .. . ")
#print("\x0A")
#print("[*] Rebinding . . ")
#server(ip=ip, port=port)
#print(ol)
test.listen(5)
def accept():
def renv():
cl, cl_addr = test.accept()
tlog.append("IP::%s connected on port %d" %(cl_addr[0],cl_addr[1]))
ipx = []
with open("ban.f", 'r') as file:
for lin2 in file:
lin2 = lin2.strip()
ipx.append(lin2)
#cl.send("[*] Checking your internet protocol, If banned / permament / temporary .. . \n".encode())
if cl_addr[0] in ipx:
print("[-] IP %s is forbidden to access this resource!! And has been logged right a way . . .For security reasons It was banned!" %(cl_addr[0]))
frosty = open("log.txt", 'a')
frosty.write("IP %s tried to access resource, but actions were limited and the client was banned!!! For security reasons!" %(cl_addr[0]) + "\n")
frosty.close()
renv()
while True:
note = open("log.f", 'a')
#client_handle = multiprocessing.reduction.reduce_handle(cl.fileno())
dump.append(cl_addr[0])
note.write("Connection from: %s " % (cl_addr[0]) + "\n")
print("[+] IP: %s conncted on port %d" %(cl_addr[0],cl_addr[1]))
vr = send(socket=cl, data=454, ip=cl_addr[0], port=cl_addr[1])
#print(vr)
#if 'code1' in vr:
#renv()
vr = vr
tlog.append("IP:%s"%(cl_addr[0]) + "data::%s"%(vr))
auth = authenticate(data=vr, log=note, func=accept, ip=cl_addr[0], socket=cl, port=cl_addr[1])
print(auth)
if 'code2' in auth:
print("[*] %s is authorised!" %(cl_addr[0]))
vrv = threading.Thread(target=client, args=(cl_addr[0], True, cl_addr[1], cl, note))
vrv.start()
renv()
renv()
def func():
#cl, cl_addr = test.accept()
#cl, cl_addr = test.accept()
usersa = 0
for users in range(5):
try:
vrx = threading.Thread(target=accept)
#print(usersa)
usersa += 1
if usersa == 5:
break
vrx.start()
except Exception as fl:
print(fl)
func()
def config(ip):
address = ip[0]
port = int(ip[1])
users = 5
server(ip=address, port=port)
def __main__():
parsie = argparse.ArgumentParser()
parsie.add_argument("-l", "--localhost", help="Specify IPv4/IPv6 LAN Address | Default: LAN Address", default=socket.gethostbyname(socket.gethostname()) ,required=False)
parsie.add_argument("-u", "--upload", help="Upload a configuration | If mode is off", required=False)
parsie.add_argument("-p", '--port', help="Specify a port", default=0, required=False)
parsie.add_argument("-s", "--start", help="Modes: on/off", default="off", required=False)
args = parsie.parse_args()
lhost = args.localhost
upload = args.upload
port = int(args.port)
st = args.start
if st == 'off':
configuration = []
with open(upload, 'r') as file:
for lines in file:
word = lines.strip()
configuration.append(word)
config(ip=configuration)
__main__()
|
conftest.py
|
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Anshuman Bhaduri
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014-2015 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
import logging
import multiprocessing
import os
import subprocess
import sys
import tempfile
import time
import traceback
import pytest
import xcffib
import xcffib.testing
import xcffib.xproto
import libqtile.config
from libqtile import command_client, command_interface, ipc
from libqtile.backend.x11 import xcore
from libqtile.confreader import Config
from libqtile.core.session_manager import SessionManager
from libqtile.lazy import lazy
from libqtile.log_utils import init_log
from libqtile.resources import default_config
# the default sizes for the Xephyr windows
WIDTH = 800
HEIGHT = 600
SECOND_WIDTH = 640
SECOND_HEIGHT = 480
max_sleep = 5.0
sleep_time = 0.1
def pytest_addoption(parser):
parser.addoption(
"--debuglog", action="store_true", default=False, help="enable debug output"
)
class Retry:
def __init__(self, fail_msg='retry failed!', ignore_exceptions=(),
dt=sleep_time, tmax=max_sleep, return_on_fail=False):
self.fail_msg = fail_msg
self.ignore_exceptions = ignore_exceptions
self.dt = dt
self.tmax = tmax
self.return_on_fail = return_on_fail
def __call__(self, fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
tmax = time.time() + self.tmax
dt = self.dt
ignore_exceptions = self.ignore_exceptions
while time.time() <= tmax:
try:
return fn(*args, **kwargs)
except ignore_exceptions:
pass
except AssertionError:
break
time.sleep(dt)
dt *= 1.5
if self.return_on_fail:
return False
else:
raise AssertionError(self.fail_msg)
return wrapper
@Retry(ignore_exceptions=(xcffib.ConnectionException,), return_on_fail=True)
def can_connect_x11(disp=':0', *, ok=None):
if ok is not None and not ok():
raise AssertionError()
conn = xcffib.connect(display=disp)
conn.disconnect()
return True
@Retry(ignore_exceptions=(ipc.IPCError,), return_on_fail=True)
def can_connect_qtile(socket_path, *, ok=None):
if ok is not None and not ok():
raise AssertionError()
ipc_client = ipc.Client(socket_path)
ipc_command = command_interface.IPCCommandInterface(ipc_client)
client = command_client.InteractiveCommandClient(ipc_command)
val = client.status()
if val == 'OK':
return True
return False
def whereis(program):
"""Search PATH for executable"""
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and \
not os.path.isdir(os.path.join(path, program)):
return os.path.join(path, program)
return None
class BareConfig(Config):
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
libqtile.layout.stack.Stack(num_stacks=1),
libqtile.layout.stack.Stack(num_stacks=2)
]
floating_layout = libqtile.resources.default_config.floating_layout
keys = [
libqtile.config.Key(
["control"],
"k",
lazy.layout.up(),
),
libqtile.config.Key(
["control"],
"j",
lazy.layout.down(),
),
]
mouse = []
screens = [libqtile.config.Screen()]
follow_mouse_focus = False
class Xephyr:
"""Spawn Xephyr instance
Set-up a Xephyr instance with the given parameters. The Xephyr instance
must be started, and then stopped.
"""
def __init__(self,
xinerama=True,
randr=False,
two_screens=True,
width=WIDTH,
height=HEIGHT,
xoffset=None):
self.xinerama = xinerama
self.randr = randr
self.two_screens = two_screens
self.width = width
self.height = height
if xoffset is None:
self.xoffset = width
else:
self.xoffset = xoffset
self.proc = None # Handle to Xephyr instance, subprocess.Popen object
self.display = None
self.display_file = None
def __enter__(self):
try:
self.start_xephyr()
except: # noqa: E722
self.stop_xephyr()
raise
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop_xephyr()
def start_xephyr(self):
"""Start Xephyr instance
Starts the Xephyr instance and sets the `self.display` to the display
which is used to setup the instance.
"""
# get a new display
display, self.display_file = xcffib.testing.find_display()
self.display = ":{}".format(display)
# build up arguments
args = [
"Xephyr",
"-name",
"qtile_test",
self.display,
"-ac",
"-screen",
"{}x{}".format(self.width, self.height),
]
if self.two_screens:
args.extend(["-origin", "%s,0" % self.xoffset, "-screen",
"%sx%s" % (SECOND_WIDTH, SECOND_HEIGHT)])
if self.xinerama:
args.extend(["+xinerama"])
if self.randr:
args.extend(["+extension", "RANDR"])
self.proc = subprocess.Popen(args)
if can_connect_x11(self.display, ok=lambda: self.proc.poll() is None):
return
# we wern't able to get a display up
if self.proc.poll() is None:
raise AssertionError("Unable to conncet to running Xephyr")
else:
raise AssertionError("Unable to start Xephyr, quit with return code {:d}".format(
self.proc.returncode
))
def stop_xephyr(self):
"""Stop the Xephyr instance"""
# Xephyr must be started first
if self.proc is None:
return
# Kill xephyr only if it is running
if self.proc.poll() is None:
# We should always be able to kill xephyr nicely
self.proc.terminate()
self.proc.wait()
self.proc = None
# clean up the lock file for the display we allocated
try:
self.display_file.close()
os.remove(xcffib.testing.lock_path(int(self.display[1:])))
except OSError:
pass
class TestManager:
"""Spawn a Qtile instance
Setup a qtile server instance on the given display, with the given socket
and log files. The qtile server must be started, and then stopped when it
is done. Windows can be spawned for the qtile instance to interact with
with various `.test_*` methods.
"""
def __init__(self, sockfile, display, debug_log):
self.sockfile = sockfile
self.display = display
self.log_level = logging.DEBUG if debug_log else logging.INFO
self.proc = None
self.c = None
self.testwindows = []
def start(self, config_class):
rpipe, wpipe = multiprocessing.Pipe()
def run_qtile():
try:
kore = xcore.XCore(display_name=self.display)
init_log(self.log_level, log_path=None, log_color=False)
q = SessionManager(kore, config_class(), fname=self.sockfile)
q.loop()
except Exception:
wpipe.send(traceback.format_exc())
self.proc = multiprocessing.Process(target=run_qtile)
self.proc.start()
# First, wait for socket to appear
if can_connect_qtile(self.sockfile, ok=lambda: not rpipe.poll()):
ipc_client = ipc.Client(self.sockfile)
ipc_command = command_interface.IPCCommandInterface(ipc_client)
self.c = command_client.InteractiveCommandClient(ipc_command)
return
if rpipe.poll(sleep_time):
error = rpipe.recv()
raise AssertionError("Error launching Qtile, traceback:\n%s" % error)
raise AssertionError("Error launching Qtile")
def create_manager(self, config_class):
"""Create a Qtile manager instance in this thread
This should only be used when it is known that the manager will throw
an error and the returned manager should not be started, otherwise this
will likely block the thread.
"""
init_log(self.log_level, log_path=None, log_color=False)
kore = xcore.XCore(display_name=self.display)
config = config_class()
for attr in dir(default_config):
if not hasattr(config, attr):
setattr(config, attr, getattr(default_config, attr))
return SessionManager(kore, config, fname=self.sockfile)
def terminate(self):
if self.proc is None:
print("Qtile is not alive", file=sys.stderr)
else:
# try to send SIGTERM and wait up to 10 sec to quit
self.proc.terminate()
self.proc.join(10)
if self.proc.is_alive():
print("Killing qtile forcefully", file=sys.stderr)
# desperate times... this probably messes with multiprocessing...
try:
os.kill(self.proc.pid, 9)
self.proc.join()
except OSError:
# The process may have died due to some other error
pass
if self.proc.exitcode:
print("Qtile exited with exitcode: %d" % self.proc.exitcode, file=sys.stderr)
self.proc = None
for proc in self.testwindows[:]:
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
def create_window(self, create, failed=None):
"""
Uses the fucntion f to create a window.
Waits until qtile actually maps the window and then returns.
"""
client = self.c
start = len(client.windows())
create()
@Retry(ignore_exceptions=(RuntimeError,), fail_msg='Window never appeared...')
def success():
while failed is None or not failed():
if len(client.windows()) > start:
return True
raise RuntimeError("not here yet")
return success()
def _spawn_window(self, *args):
"""Starts a program which opens a window
Spawns a new subprocess for a command that opens a window, given by the
arguments to this method. Spawns the new process and checks that qtile
maps the new window.
"""
if not args:
raise AssertionError("Trying to run nothing! (missing arguments)")
proc = None
def spawn():
nonlocal proc
proc = subprocess.Popen(args, env={"DISPLAY": self.display})
def failed():
if proc.poll() is not None:
return True
return False
self.create_window(spawn, failed=failed)
self.testwindows.append(proc)
return proc
def _spawn_script(self, script, *args):
python = sys.executable
d = os.path.dirname(os.path.realpath(__file__))
python = sys.executable
path = os.path.join(d, "scripts", script)
return self._spawn_window(python, path, *args)
def kill_window(self, proc):
"""Kill a window and check that qtile unmaps it
Kills a window created by calling one of the `self.test*` methods,
ensuring that qtile removes it from the `windows` attribute.
"""
assert proc in self.testwindows, "Given process is not a spawned window"
start = len(self.c.windows())
proc.terminate()
proc.wait()
self.testwindows.remove(proc)
@Retry(ignore_exceptions=(ValueError,))
def success():
if len(self.c.windows()) < start:
return True
raise ValueError('window is still in client list!')
if not success():
raise AssertionError("Window could not be killed...")
def test_window(self, name):
return self._spawn_script("window.py", self.display, name)
def test_tkwindow(self, name, wm_type):
return self._spawn_script("tkwindow.py", name, wm_type)
def test_dialog(self, name="dialog"):
return self.test_tkwindow(name, "dialog")
def test_notification(self, name="notification"):
"""
Simulate a notification window. Note that, for testing purposes, this
process must be killed explicitly, unlike actual notifications which
are sent to a notification server and then expire after a timeout.
"""
# Don't use a real notification, e.g. notify-send or
# zenity --notification, since we want to keep the process on until
# explicitly killed
return self.test_tkwindow(name, "notification")
def test_xclock(self):
path = whereis("xclock")
return self._spawn_window(path)
def test_xeyes(self):
path = whereis("xeyes")
return self._spawn_window(path)
def test_xcalc(self):
path = whereis("xcalc")
return self._spawn_window(path)
def groupconsistency(self):
groups = self.c.groups()
screens = self.c.screens()
seen = set()
for g in groups.values():
scrn = g["screen"]
if scrn is not None:
if scrn in seen:
raise AssertionError(
"Screen referenced from more than one group.")
seen.add(scrn)
assert screens[scrn]["group"] == g["name"]
assert len(seen) == len(screens), "Not all screens \
had an attached group."
@pytest.fixture(scope="session")
def xvfb():
with xcffib.testing.XvfbTest():
display = os.environ["DISPLAY"]
if not can_connect_x11(display):
raise OSError("Xvfb did not come up")
yield
@pytest.fixture(scope="function")
def xephyr(request, xvfb):
kwargs = getattr(request, "param", {})
with Xephyr(**kwargs) as x:
yield x
@pytest.fixture(scope="function")
def manager(request, xephyr):
config = getattr(request, "param", BareConfig)
for attr in dir(default_config):
if not hasattr(config, attr):
setattr(config, attr, getattr(default_config, attr))
with tempfile.NamedTemporaryFile() as f:
sockfile = f.name
try:
manager = TestManager(sockfile, xephyr.display, request.config.getoption("--debuglog"))
manager.start(config)
yield manager
finally:
manager.terminate()
@pytest.fixture(scope="function")
def manager_nospawn(request, xephyr):
with tempfile.NamedTemporaryFile() as f:
sockfile = f.name
try:
manager = TestManager(sockfile, xephyr.display, request.config.getoption("--debuglog"))
yield manager
finally:
manager.terminate()
no_xinerama = pytest.mark.parametrize("xephyr", [{"xinerama": False}], indirect=True)
|
ws.py
|
#!/usr/bin/env python3
# coding=utf-8
# requires https://pypi.python.org/pypi/websocket-client/
from excepthook import uncaught_exception, install_thread_excepthook
import sys
sys.excepthook = uncaught_exception
install_thread_excepthook()
# !! Important! Be careful when adding code/imports before this point.
# Our except hook is installed here, so any errors before this point
# won't be caught if they're not in a try-except block.
# Hence, please avoid adding code before this comment; if it's necessary,
# test it thoroughly.
import os
import platform
# noinspection PyPackageRequirements
import websocket
from threading import Thread
import traceback
from bodyfetcher import BodyFetcher
import chatcommunicate
from datetime import datetime
from spamhandling import check_if_spam_json
from globalvars import GlobalVars
from datahandling import load_pickle, PICKLE_STORAGE, load_files, filter_auto_ignored_posts
from metasmoke import Metasmoke
from metasmoke_cache import MetasmokeCache
from deletionwatcher import DeletionWatcher
import json
import time
import requests
# noinspection PyPackageRequirements
from tld.utils import update_tld_names, TldIOError
from helpers import exit_mode, log, Helpers, log_exception
from flovis import Flovis
from tasks import Tasks
import chatcommands
MAX_SE_WEBSOCKET_RETRIES = 5
MIN_PYTHON_VERSION = (3, 5, 0)
RECOMMENDED_PYTHON_VERSION = (3, 6, 0)
THIS_PYTHON_VERSION = tuple(map(int, platform.python_version_tuple()))
if os.path.isfile("plugin.py"):
try:
import plugin
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
error_msg = "{}: {}\n{}".format(exc_type.__name__, exc_obj, traceback.format_tb(exc_tb))
log('warning', "Error while importing plugin:\n" + error_msg)
# Ignore and move on
levels = {
'debug': 0,
'info': 1,
'warning': 2,
'error': 3
}
if any('--loglevel' in x for x in sys.argv):
idx = ['--loglevel' in x for x in sys.argv].index(True)
arg = sys.argv[idx].split('=')
if len(arg) >= 2:
Helpers.min_log_level = levels[arg[-1]]
else:
Helpers.min_log_level = 0
else:
Helpers.min_log_level = 0
# Python 3.5.0 is the bare minimum needed to run SmokeDetector
if THIS_PYTHON_VERSION < MIN_PYTHON_VERSION:
msg = "SmokeDetector requires Python version {0}.{1}.{2} or newer to run.".format(*MIN_PYTHON_VERSION)
raise RuntimeError(msg)
# However, 3.5 is already deprecated so we need to prepare for this
# with a warning in the logs about it.
if THIS_PYTHON_VERSION < RECOMMENDED_PYTHON_VERSION:
msg = 'SmokeDetector may remove support for versions of Python before ' \
'{0}.{1}.{2} in the future, please consider upgrading your instances of ' \
'SmokeDetector to use Python {0}.{1}.{2} or newer.'.format(*RECOMMENDED_PYTHON_VERSION)
log('warning', msg)
if not GlobalVars.metasmoke_host:
log('info', "metasmoke host not found. Set it as metasmoke_host in the config file. "
"See https://github.com/Charcoal-SE/metasmoke.")
if not GlobalVars.metasmoke_key:
log('info', "No metasmoke key found, which is okay if both are running on the same host")
if not GlobalVars.metasmoke_ws_host:
log('info', "No metasmoke websocket host found, which is okay if you're anti-websocket")
# noinspection PyProtectedMember
def restart_automatically():
Metasmoke.send_statistics()
chatcommunicate.tell_rooms_with("debug", "{}: Executing automatic scheduled reboot.".format(GlobalVars.location))
time.sleep(6)
exit_mode("reboot")
def load_ms_cache_data():
"""
Load cached data from a pickle file on disk. Should really only need to be called once, on startup.
:returns: None
"""
if os.path.isfile(os.path.join(PICKLE_STORAGE, 'metasmokeCacheData.p')):
data = load_pickle('metasmokeCacheData.p')
MetasmokeCache._cache = data['cache']
MetasmokeCache._expiries = data['expiries']
# Restart after 6 hours, put this thing here so it doesn't get stuck at updating TLD or logging in indefinitely
Tasks.later(restart_automatically, after=21600)
try:
update_tld_names()
except TldIOError as ioerr:
# That we were unable to update the TLD names isn't actually a fatal error, so just log it and continue.
strerror = str(ioerr)
if "permission denied:" in strerror.lower():
if "/usr/local/lib/python" in strerror and "/dist-packages/" in strerror:
err_msg = "WARNING: Cannot update TLD names, due to `tld` being system-wide installed and not " \
"user-level installed. Skipping TLD names update. \n"
if "/home/" in strerror and ".local/lib/python" in strerror and "/site-packages/tld/" in strerror:
err_msg = "WARNING: Cannot read/write to user-space `tld` installation, check permissions on the " \
"path. Skipping TLD names update. \n"
else:
err_msg = strerror
elif "certificate verify failed" in strerror.lower():
# Ran into this error in testing on Windows, best to throw a warn if we get this...
err_msg = "WARNING: Cannot verify SSL connection for TLD names update; skipping TLD names update."
else:
err_msg = strerror
log_exception(type(ioerr), ioerr, err_msg, True, level="warning")
if "ChatExchangeU" in os.environ:
log('debug', "ChatExchange username loaded from environment")
username = os.environ["ChatExchangeU"]
elif GlobalVars.chatexchange_u:
log('debug', "ChatExchange username loaded from config")
username = GlobalVars.chatexchange_u
else:
log('error', "No ChatExchange username provided. Set it in config or provide it via environment variable")
exit_mode("shutdown")
if "ChatExchangeP" in os.environ:
log('debug', "ChatExchange password loaded from environment")
password = os.environ["ChatExchangeP"]
elif GlobalVars.chatexchange_p:
log('debug', "ChatExchange password loaded from config")
password = GlobalVars.chatexchange_p
else:
log('error', "No ChatExchange password provided. Set it in config or provide it via environment variable")
exit_mode("shutdown")
# We need an instance of bodyfetcher before load_files() is called
GlobalVars.bodyfetcher = BodyFetcher()
if GlobalVars.flovis_host:
GlobalVars.flovis = Flovis(GlobalVars.flovis_host)
load_files()
load_ms_cache_data()
filter_auto_ignored_posts()
GlobalVars.standby_mode = "standby" in sys.argv
GlobalVars.no_se_activity_scan = 'no_se_activity_scan' in sys.argv
chatcommunicate.init(username, password)
Tasks.periodic(Metasmoke.send_status_ping_and_verify_scanning_if_active, interval=60)
if GlobalVars.standby_mode:
chatcommunicate.tell_rooms_with("debug", GlobalVars.standby_message)
Metasmoke.send_status_ping()
while GlobalVars.standby_mode:
time.sleep(3)
chatcommunicate.join_command_rooms()
# noinspection PyProtectedMember
def check_socket_connections():
for client in chatcommunicate._clients.values():
if client.last_activity and (datetime.utcnow() - client.last_activity).total_seconds() >= 60:
exit_mode("socket_failure")
Tasks.periodic(check_socket_connections, interval=90)
log('info', '{} active'.format(GlobalVars.location))
log('info', 'MS host: {}'.format(GlobalVars.metasmoke_host))
def setup_websocket(attempt, max_attempts):
try:
ws = websocket.create_connection("wss://qa.sockets.stackexchange.com/")
ws.send("155-questions-active")
return ws
except websocket.WebSocketException:
log('warning', 'WS failed to create websocket connection. Attempt {} of {}.'.format(attempt, max_attempts))
return None
def init_se_websocket_or_reboot(max_tries, tell_debug_room_on_error=False):
for tries in range(1, 1 + max_tries, 1):
ws = setup_websocket(tries, max_tries)
if ws:
break
else:
# Wait and hopefully network issues will be solved
time.sleep(10)
else:
error_message = 'SE WebSocket: Max retries exceeded. Exiting, maybe a restart will kick things.'
log('error', error_message)
if tell_debug_room_on_error:
chatcommunicate.tell_rooms_with("debug", error_message)
time.sleep(6) # Make it more likely the message is actually sent to the rooms prior to rebooting.
exit_mode("reboot")
return ws
ws = init_se_websocket_or_reboot(MAX_SE_WEBSOCKET_RETRIES)
GlobalVars.deletion_watcher = DeletionWatcher()
if "first_start" in sys.argv:
chatcommunicate.tell_rooms_with('debug', GlobalVars.s if GlobalVars.on_branch else GlobalVars.s_reverted)
Tasks.periodic(Metasmoke.send_statistics, interval=600)
metasmoke_ws_t = Thread(name="metasmoke websocket", target=Metasmoke.init_websocket)
metasmoke_ws_t.start()
while not GlobalVars.no_se_activity_scan:
try:
a = ws.recv()
if a is not None and a != "":
action = json.loads(a)["action"]
if action == "hb":
ws.send("hb")
if action == "155-questions-active":
if GlobalVars.flovis is not None:
data = json.loads(json.loads(a)['data'])
GlobalVars.flovis.stage('received', data['siteBaseHostAddress'], data['id'], json.loads(a))
is_spam, reason, why = check_if_spam_json(a)
t = Thread(name="bodyfetcher post enqueing",
target=GlobalVars.bodyfetcher.add_to_queue,
args=(a, True if is_spam else None))
t.start()
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
now = datetime.utcnow()
delta = now - GlobalVars.startup_utc_date
seconds = delta.total_seconds()
tr = traceback.format_exc()
exception_only = ''.join(traceback.format_exception_only(type(e), e))\
.strip()
n = os.linesep
logged_msg = str(now) + " UTC" + n + exception_only + n + tr + n + n
log('error', logged_msg)
log_exception(exc_type, exc_obj, exc_tb)
if seconds < 180 and exc_type not in {websocket.WebSocketConnectionClosedException, requests.ConnectionError}:
# noinspection PyProtectedMember
exit_mode("early_exception")
ws = init_se_websocket_or_reboot(MAX_SE_WEBSOCKET_RETRIES, tell_debug_room_on_error=True)
chatcommunicate.tell_rooms_with("debug", "{}: SE WebSocket: recovered from `{}`"
.format(GlobalVars.location, exception_only))
while GlobalVars.no_se_activity_scan:
# Sleep for longer than the automatic restart
time.sleep(30000)
|
configure_and_test_integration_instances.py
|
from __future__ import print_function
import argparse
import ast
import json
import os
import subprocess
import sys
import uuid
import zipfile
from datetime import datetime
from distutils.version import LooseVersion
from enum import IntEnum
from pprint import pformat
from threading import Thread
from time import sleep
from typing import List, Tuple, Union
from urllib.parse import quote_plus
import demisto_client
from demisto_sdk.commands.common.constants import FileType
from demisto_sdk.commands.common.tools import run_threads_list, run_command, get_yaml, \
str2bool, format_version, find_type
from demisto_sdk.commands.test_content.constants import SSH_USER
from demisto_sdk.commands.test_content.mock_server import MITMProxy, run_with_mock, RESULT
from demisto_sdk.commands.test_content.tools import update_server_configuration, is_redhat_instance
from demisto_sdk.commands.test_content.TestContentClasses import BuildContext
from demisto_sdk.commands.validate.validate_manager import ValidateManager
from ruamel import yaml
from Tests.Marketplace.search_and_install_packs import search_and_install_packs_and_their_dependencies, \
upload_zipped_packs, install_all_content_packs_for_nightly
from Tests.scripts.utils.log_util import install_logging
from Tests.scripts.utils import logging_wrapper as logging
from Tests.test_content import get_server_numeric_version
from Tests.test_integration import __get_integration_config, __test_integration_instance, disable_all_integrations
from Tests.tools import run_with_proxy_configured
from Tests.update_content_data import update_content
MARKET_PLACE_MACHINES = ('master',)
SKIPPED_PACKS = ['NonSupported', 'ApiModules']
NO_PROXY = ','.join([
'oproxy.demisto.ninja',
'oproxy-dev.demisto.ninja',
])
NO_PROXY_CONFIG = {'python.pass.extra.keys': f'--env##no_proxy={NO_PROXY}'} # noqa: E501
DOCKER_HARDENING_CONFIGURATION = {
'docker.cpu.limit': '1.0',
'docker.run.internal.asuser': 'true',
'limit.docker.cpu': 'true',
'python.pass.extra.keys': f'--memory=1g##--memory-swap=-1##--pids-limit=256##--ulimit=nofile=1024:8192##--env##no_proxy={NO_PROXY}', # noqa: E501
'powershell.pass.extra.keys': f'--env##no_proxy={NO_PROXY}',
}
DOCKER_HARDENING_CONFIGURATION_FOR_PODMAN = {
'docker.run.internal.asuser': 'true'
}
MARKET_PLACE_CONFIGURATION = {
'content.pack.verify': 'false',
'marketplace.initial.sync.delay': '0',
'content.pack.ignore.missing.warnings.contentpack': 'true'
}
AVOID_DOCKER_IMAGE_VALIDATION = {
'content.validate.docker.images': 'false'
}
ID_SET_PATH = './artifacts/id_set.json'
class Running(IntEnum):
CI_RUN = 0
WITH_OTHER_SERVER = 1
WITH_LOCAL_SERVER = 2
class Server:
def __init__(self, internal_ip, port, user_name, password):
self.__ssh_client = None
self.__client = None
self.internal_ip = internal_ip
self.ssh_tunnel_port = port
self.user_name = user_name
self.password = password
def __str__(self):
return self.internal_ip
@property
def client(self):
if self.__client is None:
self.__client = self.reconnect_client()
return self.__client
def reconnect_client(self):
self.__client = demisto_client.configure(f'https://localhost:{self.ssh_tunnel_port}',
verify_ssl=False,
username=self.user_name,
password=self.password)
return self.__client
def add_server_configuration(self, config_dict, error_msg, restart=False):
update_server_configuration(self.client, config_dict, error_msg)
if restart:
self.exec_command('sudo systemctl restart demisto')
def exec_command(self, command):
subprocess.check_output(f'ssh {SSH_USER}@{self.internal_ip} {command}'.split(),
stderr=subprocess.STDOUT)
def get_id_set(id_set_path) -> Union[dict, None]:
"""
Used to collect the ID set so it can be passed to the Build class on init.
:return: ID set as a dict if it exists.
"""
if os.path.isfile(id_set_path):
return get_json_file(id_set_path)
return None
class Build:
# START CHANGE ON LOCAL RUN #
content_path = f'{os.getenv("HOME")}/project' if os.getenv('CIRCLECI') else os.getenv('CI_PROJECT_DIR')
test_pack_target = f'{os.getenv("HOME")}/project/Tests' if os.getenv('CIRCLECI') else f'{os.getenv("CI_PROJECT_DIR")}/Tests' # noqa
key_file_path = 'Use in case of running with non local server'
run_environment = Running.CI_RUN
env_results_path = f'{os.getenv("ARTIFACTS_FOLDER")}/env_results.json'
DEFAULT_SERVER_VERSION = '99.99.98'
# END CHANGE ON LOCAL RUN #
def __init__(self, options):
self._proxy = None
self.git_sha1 = options.git_sha1
self.branch_name = options.branch
self.ci_build_number = options.build_number
self.is_nightly = options.is_nightly
self.ami_env = options.ami_env
self.server_to_port_mapping, self.server_numeric_version = self.get_servers(options.ami_env)
self.secret_conf = get_json_file(options.secret)
self.username = options.user if options.user else self.secret_conf.get('username')
self.password = options.password if options.password else self.secret_conf.get('userPassword')
self.servers = [Server(internal_ip,
port,
self.username,
self.password) for internal_ip, port in self.server_to_port_mapping.items()]
self.is_private = options.is_private
conf = get_json_file(options.conf)
self.tests = conf['tests']
self.skipped_integrations_conf = conf['skipped_integrations']
self.unmockable_integrations = conf['unmockable_integrations']
id_set_path = options.id_set_path if options.id_set_path else ID_SET_PATH
self.id_set = get_id_set(id_set_path)
self.test_pack_path = options.test_pack_path if options.test_pack_path else None
self.tests_to_run = self.fetch_tests_list(options.tests_to_run)
self.content_root = options.content_root
self.pack_ids_to_install = self.fetch_pack_ids_to_install(options.pack_ids_to_install)
self.service_account = options.service_account
@property
def proxy(self) -> MITMProxy:
"""
A property method that should create and return a single proxy instance through out the build
Returns:
The single proxy instance that should be used in this build.
"""
if not self._proxy:
self._proxy = MITMProxy(self.servers[0].internal_ip,
logging_module=logging,
build_number=self.ci_build_number,
branch_name=self.branch_name)
return self._proxy
@staticmethod
def fetch_tests_list(tests_to_run_path: str):
"""
Fetches the test list from the filter.
:param tests_to_run_path: Path to location of test filter.
:return: List of tests if there are any, otherwise empty list.
"""
tests_to_run = []
with open(tests_to_run_path, "r") as filter_file:
tests_from_file = filter_file.readlines()
for test_from_file in tests_from_file:
test_clean = test_from_file.rstrip()
tests_to_run.append(test_clean)
return tests_to_run
@staticmethod
def fetch_pack_ids_to_install(packs_to_install_path: str):
"""
Fetches the test list from the filter.
:param packs_to_install_path: Path to location of pack IDs to install file.
:return: List of Pack IDs if there are any, otherwise empty list.
"""
tests_to_run = []
with open(packs_to_install_path, "r") as filter_file:
tests_from_file = filter_file.readlines()
for test_from_file in tests_from_file:
test_clean = test_from_file.rstrip()
tests_to_run.append(test_clean)
return tests_to_run
@staticmethod
def get_servers(ami_env):
env_conf = get_env_conf()
server_to_port_mapping = map_server_to_port(env_conf, ami_env)
if Build.run_environment == Running.CI_RUN:
server_numeric_version = get_server_numeric_version(ami_env)
else:
server_numeric_version = Build.DEFAULT_SERVER_VERSION
return server_to_port_mapping, server_numeric_version
def options_handler():
parser = argparse.ArgumentParser(description='Utility for instantiating and testing integration instances')
parser.add_argument('-u', '--user', help='The username for the login', required=True)
parser.add_argument('-p', '--password', help='The password for the login', required=True)
parser.add_argument('--ami_env', help='The AMI environment for the current run. Options are '
'"Server Master", "Server 6.0". '
'The server url is determined by the AMI environment.')
parser.add_argument('-g', '--git_sha1', help='commit sha1 to compare changes with')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-s', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--is-nightly', type=str2bool, help='Is nightly build')
parser.add_argument('-pr', '--is_private', type=str2bool, help='Is private build')
parser.add_argument('--branch', help='GitHub branch name', required=True)
parser.add_argument('--build-number', help='CI job number where the instances were created', required=True)
parser.add_argument('--test_pack_path', help='Path to where the test pack will be saved.',
default='/home/runner/work/content-private/content-private/content/artifacts/packs')
parser.add_argument('--content_root', help='Path to the content root.',
default='/home/runner/work/content-private/content-private/content')
parser.add_argument('--id_set_path', help='Path to the ID set.')
parser.add_argument('-l', '--tests_to_run', help='Path to the Test Filter.',
default='./artifacts/filter_file.txt')
parser.add_argument('-pl', '--pack_ids_to_install', help='Path to the packs to install file.',
default='./artifacts/content_packs_to_install.txt')
# disable-secrets-detection-start
parser.add_argument('-sa', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
# disable-secrets-detection-end
options = parser.parse_args()
return options
def check_test_version_compatible_with_server(test, server_version):
"""
Checks if a given test is compatible wis the given server version.
Arguments:
test: (dict)
Test playbook object from content conf.json. May contain the following fields: "playbookID",
"integrations", "instance_names", "timeout", "nightly", "fromversion", "toversion.
server_version: (int)
The server numerical version.
Returns:
(bool) True if test is compatible with server version or False otherwise.
"""
test_from_version = format_version(test.get('fromversion', '0.0.0'))
test_to_version = format_version(test.get('toversion', '99.99.99'))
server_version = format_version(server_version)
if not LooseVersion(test_from_version) <= LooseVersion(server_version) <= LooseVersion(test_to_version):
playbook_id = test.get('playbookID')
logging.debug(
f'Test Playbook: {playbook_id} was ignored in the content installation test due to version mismatch '
f'(test versions: {test_from_version}-{test_to_version}, server version: {server_version})')
return False
return True
def filter_tests_with_incompatible_version(tests, server_version):
"""
Filter all tests with incompatible version to the given server.
Arguments:
tests: (list)
List of test objects.
server_version: (int)
The server numerical version.
Returns:
(lst): List of filtered tests (compatible version)
"""
filtered_tests = [test for test in tests if
check_test_version_compatible_with_server(test, server_version)]
return filtered_tests
def configure_integration_instance(integration, client, placeholders_map):
"""
Configure an instance for an integration
Arguments:
integration: (dict)
Integration object whose params key-values are set
client: (demisto_client)
The client to connect to
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
Returns:
(dict): Configured integration instance
"""
integration_name = integration.get('name')
logging.info(f'Configuring instance for integration "{integration_name}"')
integration_instance_name = integration.get('instance_name', '')
integration_params = change_placeholders_to_values(placeholders_map, integration.get('params'))
is_byoi = integration.get('byoi', True)
validate_test = integration.get('validate_test', True)
integration_configuration = __get_integration_config(client, integration_name)
if not integration_configuration:
return None
# In the integration configuration in content-test-conf conf.json, the test_validate flag was set to false
if not validate_test:
logging.debug(f'Skipping configuration for integration: {integration_name} (it has test_validate set to false)')
return None
module_instance = set_integration_instance_parameters(integration_configuration, integration_params,
integration_instance_name, is_byoi, client)
return module_instance
def filepath_to_integration_name(integration_file_path):
"""Load an integration file and return the integration name.
Args:
integration_file_path (str): The path to an integration yml file.
Returns:
(str): The name of the integration.
"""
integration_yaml = get_yaml(integration_file_path)
integration_name = integration_yaml.get('name')
return integration_name
def get_integration_names_from_files(integration_files_list):
integration_names_list = [filepath_to_integration_name(path) for path in integration_files_list]
return [name for name in integration_names_list if name] # remove empty values
def get_new_and_modified_integration_files(branch_name):
"""Return 2 lists - list of new integrations and list of modified integrations since the first commit of the branch.
Args:
branch_name: The branch name against which we will run the 'git diff' command.
Returns:
(tuple): Returns a tuple of two lists, the file paths of the new integrations and modified integrations.
"""
# get changed yaml files (filter only added and modified files)
file_validator = ValidateManager(skip_dependencies=True)
file_validator.branch_name = branch_name
modified_files, added_files, _, _ = file_validator.get_changed_files_from_git()
new_integration_files = [
file_path for file_path in added_files if
find_type(file_path) in [FileType.INTEGRATION, FileType.BETA_INTEGRATION]
]
modified_integration_files = [
file_path for file_path in modified_files if
isinstance(file_path, str) and find_type(file_path) in [FileType.INTEGRATION, FileType.BETA_INTEGRATION]
]
return new_integration_files, modified_integration_files
def is_content_update_in_progress(client):
"""Make request to check if content is updating.
Args:
client (demisto_client): The configured client to use.
Returns:
(str): Returns the request response data which is 'true' if updating and 'false' if not.
"""
host = client.api_client.configuration.host
logging.debug(f'Making "Get" request to server - "{host}" to check if content is installing.')
# make request to check if content is updating
response_data, status_code, _ = demisto_client.generic_request_func(self=client, path='/content/updating',
method='GET', accept='application/json')
if status_code >= 300 or status_code < 200:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
logging.error(f"Failed to check if content is installing - with status code {status_code}\n{message}")
return 'request unsuccessful'
return response_data
def get_content_version_details(client, ami_name):
"""Make request for details about the content installed on the demisto instance.
Args:
client (demisto_client): The configured client to use.
ami_name (string): the role name of the machine
Returns:
(tuple): The release version and asset ID of the content installed on the demisto instance.
"""
host = client.api_client.configuration.host
logging.info(f'Making "POST" request to server - "{host}" to check installed content.')
# make request to installed content details
uri = '/content/installedlegacy' if ami_name in MARKET_PLACE_MACHINES else '/content/installed'
response_data, status_code, _ = demisto_client.generic_request_func(self=client, path=uri,
method='POST')
try:
result_object = ast.literal_eval(response_data)
logging.debug(f'Response was {response_data}')
except ValueError:
logging.exception('failed to parse response from demisto.')
return '', 0
if status_code >= 300 or status_code < 200:
message = result_object.get('message', '')
logging.error(f'Failed to check if installed content details - with status code {status_code}\n{message}')
return result_object.get('release', ''), result_object.get('assetId', 0)
def change_placeholders_to_values(placeholders_map, config_item):
"""Replaces placeholders in the object to their real values
Args:
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
config_item: (json object)
Integration configuration object.
Returns:
dict. json object with the real configuration.
"""
item_as_string = json.dumps(config_item)
for key, value in placeholders_map.items():
item_as_string = item_as_string.replace(key, str(value))
return json.loads(item_as_string)
def set_integration_params(build,
integrations,
secret_params,
instance_names,
placeholders_map,
logging_module=logging):
"""
For each integration object, fill in the parameter values needed to configure an instance from
the secret_params taken from our secret configuration file. Because there may be a number of
configurations for a single integration (if there are values provided in our secret conf for
multiple different instances of the same integration) then selects the parameter values for the
configuration of the instance whose instance is in 'instance_names' (will take the last one listed
in 'secret_params'). Note that this function does not explicitly return the modified 'integrations'
object but rather it modifies the 'integrations' object since it is passed by reference and not by
value, so the 'integrations' object that was passed to this function will have been changed once
this function has completed execution and gone out of scope.
Arguments:
build: Build object
integrations: (list of dicts)
List of integration objects whose 'params' attribute will be populated in this function.
secret_params: (list of dicts)
List of secret configuration values for all of our integrations (as well as specific
instances of said integrations).
instance_names: (list)
The names of particular instances of an integration to use the secret_params of as the
configuration values.
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
logging_module (Union[ParallelLoggingManager,logging]): The logging module to use
Returns:
(bool): True if integrations params were filled with secret configuration values, otherwise false
"""
for integration in integrations:
integration_params = [change_placeholders_to_values(placeholders_map, item) for item
in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
# if there are more than one integration params, it means that there are configuration
# values in our secret conf for multiple instances of the given integration and now we
# need to match the configuration values to the proper instance as specified in the
# 'instance_names' list argument
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
failed_match_instance_msg = 'There are {} instances of {}, please select one of them by using' \
' the instance_name argument in conf.json. The options are:\n{}'
logging_module.error(failed_match_instance_msg.format(len(integration_params),
integration['name'],
'\n'.join(optional_instance_names)))
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
if integration['name'] not in build.unmockable_integrations:
integration['params'].update({'proxy': True})
logging.debug(
f'Configuring integration "{integration["name"]}" with proxy=True')
else:
integration['params'].update({'proxy': False})
logging.debug(
f'Configuring integration "{integration["name"]}" with proxy=False')
return True
def set_module_params(param_conf, integration_params):
"""Configure a parameter object for use in a module instance.
Each integration parameter is actually an object with many fields that together describe it. E.g. a given
parameter will have all of the following fields - "name", "display", "value", "hasvalue", "defaultValue",
etc. This function fills the "value" field for a parameter configuration object and returns it for use in
a module instance.
Args:
param_conf (dict): The parameter configuration object.
integration_params (dict): The values to use for an integration's parameters to configure an instance.
Returns:
(dict): The configured parameter object
"""
if param_conf['display'] in integration_params or param_conf['name'] in integration_params:
# param defined in conf
key = param_conf['display'] if param_conf['display'] in integration_params else param_conf['name']
if key == 'credentials':
credentials = integration_params[key]
param_value = {
'credential': '',
'identifier': credentials['identifier'],
'password': credentials['password'],
'passwordChanged': False
}
else:
param_value = integration_params[key]
param_conf['value'] = param_value
param_conf['hasvalue'] = True
elif param_conf['defaultValue']:
# if the parameter doesn't have a value provided in the integration's configuration values
# but does have a default value then assign it to the parameter for the module instance
param_conf['value'] = param_conf['defaultValue']
return param_conf
def __set_server_keys(client, integration_params, integration_name):
"""Adds server configuration keys using the demisto_client.
Args:
client (demisto_client): The configured client to use.
integration_params (dict): The values to use for an integration's parameters to configure an instance.
integration_name (str): The name of the integration which the server configurations keys are related to.
"""
if 'server_keys' not in integration_params:
return
logging.info(f'Setting server keys for integration: {integration_name}')
data: dict = {
'data': {},
'version': -1
}
for key, value in integration_params.get('server_keys').items():
data['data'][key] = value
update_server_configuration(
client=client,
server_configuration=data,
error_msg='Failed to set server keys'
)
def set_integration_instance_parameters(integration_configuration,
integration_params,
integration_instance_name,
is_byoi,
client):
"""Set integration module values for integration instance creation
The integration_configuration and integration_params should match, in that
they are for the same integration
Arguments:
integration_configuration: (dict)
dictionary of the integration configuration parameters/keys that need
filling to instantiate an instance of a given integration
integration_params: (dict)
values for a given integration taken from the configuration file in
which the secret values are stored to configure instances of various
integrations
integration_instance_name: (str)
The name of the integration instance being configured if there is one
provided in the conf.json
is_byoi: (bool)
If the integration is byoi or not
client: (demisto_client)
The client to connect to
Returns:
(dict): The configured module instance to send to the Demisto server for
instantiation.
"""
module_configuration = integration_configuration.get('configuration', {})
if not module_configuration:
module_configuration = []
if 'integrationInstanceName' in integration_params:
instance_name = integration_params['integrationInstanceName']
else:
instance_name = '{}_test_{}'.format(integration_instance_name.replace(' ', '_'), str(uuid.uuid4()))
# define module instance
module_instance = {
'brand': integration_configuration['name'],
'category': integration_configuration['category'],
'configuration': integration_configuration,
'data': [],
'enabled': "true",
'engine': '',
'id': '',
'isIntegrationScript': is_byoi,
'name': instance_name,
'passwordProtected': False,
'version': 0
}
# set server keys
__set_server_keys(client, integration_params, integration_configuration['name'])
# set module params
for param_conf in module_configuration:
configured_param = set_module_params(param_conf, integration_params)
module_instance['data'].append(configured_param)
return module_instance
def group_integrations(integrations, skipped_integrations_conf, new_integrations_names, modified_integrations_names):
"""
Filter integrations into their respective lists - new, modified or unchanged. if it's on the skip list, then
skip if random tests were chosen then we may be configuring integrations that are neither new or modified.
Args:
integrations (list): The integrations to categorize.
skipped_integrations_conf (dict): Integrations that are on the skip list.
new_integrations_names (list): The names of new integrations.
modified_integrations_names (list): The names of modified integrations.
Returns:
(tuple): Lists of integrations objects as well as an Integration-to-Status dictionary useful for logs.
"""
new_integrations = []
modified_integrations = []
unchanged_integrations = []
integration_to_status = {}
for integration in integrations:
integration_name = integration.get('name', '')
if integration_name in skipped_integrations_conf.keys():
continue
if integration_name in new_integrations_names:
new_integrations.append(integration)
elif integration_name in modified_integrations_names:
modified_integrations.append(integration)
integration_to_status[integration_name] = 'Modified Integration'
else:
unchanged_integrations.append(integration)
integration_to_status[integration_name] = 'Unchanged Integration'
return new_integrations, modified_integrations, unchanged_integrations, integration_to_status
def get_integrations_for_test(test, skipped_integrations_conf):
"""Return a list of integration objects that are necessary for a test (excluding integrations on the skip list).
Args:
test (dict): Test dictionary from the conf.json file containing the playbookID, integrations and
instance names.
skipped_integrations_conf (dict): Skipped integrations dictionary with integration names as keys and
the skip reason as values.
Returns:
(list): List of integration objects to configure.
"""
integrations_conf = test.get('integrations', [])
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf]
integrations = [
{'name': integration, 'params': {}} for
integration in integrations_conf if integration not in skipped_integrations_conf
]
return integrations
def update_content_on_demisto_instance(client, server, ami_name):
"""Try to update the content
Args:
client (demisto_client): The configured client to use.
server (str): The server url to pass to Tests/update_content_data.py
"""
content_zip_path = 'artifacts/all_content.zip'
update_content(content_zip_path, server=server, client=client)
# Check if content update has finished installing
sleep_interval = 20
updating_content = is_content_update_in_progress(client)
while updating_content.lower() == 'true':
sleep(sleep_interval)
updating_content = is_content_update_in_progress(client)
if updating_content.lower() == 'request unsuccessful':
# since the request to check if content update installation finished didn't work, can't use that mechanism
# to check and just try sleeping for 30 seconds instead to allow for content update installation to complete
logging.debug('Request to install content was unsuccessful, sleeping for 30 seconds and retrying')
sleep(30)
else:
# check that the content installation updated
# verify the asset id matches the circleci build number / asset_id in the content-descriptor.json
release, asset_id = get_content_version_details(client, ami_name)
logging.info(f'Content Release Version: {release}')
with open('./artifacts/content-descriptor.json', 'r') as cd_file:
cd_json = json.loads(cd_file.read())
cd_release = cd_json.get('release')
cd_asset_id = cd_json.get('assetId')
if release == cd_release and asset_id == cd_asset_id:
logging.success(f'Content Update Successfully Installed on server {server}.')
else:
logging.error(
f'Content Update to version: {release} was Unsuccessful:\nAttempted to install content with release '
f'"{cd_release}" and assetId "{cd_asset_id}" but release "{release}" and assetId "{asset_id}" '
f'were retrieved from the instance post installation.')
if ami_name not in MARKET_PLACE_MACHINES:
sys.exit(1)
def report_tests_status(preupdate_fails, postupdate_fails, preupdate_success, postupdate_success,
new_integrations_names, build=None):
"""Prints errors and/or warnings if there are any and returns whether whether testing was successful or not.
Args:
preupdate_fails (set): List of tuples of integrations that failed the "Test" button prior to content
being updated on the demisto instance where each tuple is comprised of the integration name and the
name of the instance that was configured for that integration which failed.
postupdate_fails (set): List of tuples of integrations that failed the "Test" button after content was
updated on the demisto instance where each tuple is comprised of the integration name and the name
of the instance that was configured for that integration which failed.
preupdate_success (set): List of tuples of integrations that succeeded the "Test" button prior to content
being updated on the demisto instance where each tuple is comprised of the integration name and the
name of the instance that was configured for that integration which failed.
postupdate_success (set): List of tuples of integrations that succeeded the "Test" button after content was
updated on the demisto instance where each tuple is comprised of the integration name and the name
of the instance that was configured for that integration which failed.
new_integrations_names (list): List of the names of integrations that are new since the last official
content release and that will only be present on the demisto instance after the content update is
performed.
build: Build object
Returns:
(bool): False if there were integration instances that succeeded prior to the content update and then
failed after content was updated, otherwise True.
"""
testing_status = True
# a "Test" can be either successful both before and after content update(succeeded_pre_and_post variable),
# fail on one of them(mismatched_statuses variable), or on both(failed_pre_and_post variable)
succeeded_pre_and_post = preupdate_success.intersection(postupdate_success)
if succeeded_pre_and_post:
succeeded_pre_and_post_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"' for
instance_name, integration_of_instance in succeeded_pre_and_post])
logging.success(
'Integration instances that had ("Test" Button) succeeded both before and after the content update:\n'
f'{succeeded_pre_and_post_string}')
failed_pre_and_post = preupdate_fails.intersection(postupdate_fails)
mismatched_statuses = postupdate_fails - preupdate_fails
failed_only_after_update = []
failed_but_is_new = []
for instance_name, integration_of_instance in mismatched_statuses:
if integration_of_instance in new_integrations_names:
failed_but_is_new.append((instance_name, integration_of_instance))
else:
failed_only_after_update.append((instance_name, integration_of_instance))
# warnings but won't fail the build step
if failed_but_is_new:
failed_but_is_new_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"'
for instance_name, integration_of_instance in failed_but_is_new])
logging.warning(f'New Integrations ("Test" Button) Failures:\n{failed_but_is_new_string}')
if failed_pre_and_post:
failed_pre_and_post_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"'
for instance_name, integration_of_instance in failed_pre_and_post])
logging.warning(f'Integration instances that had ("Test" Button) failures '
f'both before and after the content update:\n{pformat(failed_pre_and_post_string)}')
# fail the step if there are instances that only failed after content was updated
if failed_only_after_update:
failed_only_after_update_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"' for
instance_name, integration_of_instance in failed_only_after_update])
testing_status = False
logging.critical('Integration instances that had ("Test" Button) failures only after content was updated:\n'
f'{pformat(failed_only_after_update_string)}.\n'
f'This indicates that your updates introduced breaking changes to the integration.')
else:
# creating this file to indicates that this instance passed post update tests
if build:
with open("./Tests/is_post_update_passed_{}.txt".format(build.ami_env.replace(' ', '')), 'a'):
pass
return testing_status
def get_env_conf():
if Build.run_environment == Running.CI_RUN:
return get_json_file(Build.env_results_path)
if Build.run_environment == Running.WITH_LOCAL_SERVER:
# START CHANGE ON LOCAL RUN #
return [{
"InstanceDNS": "http://localhost:8080",
"Role": "Server Master" # e.g. 'Server Master'
}]
if Build.run_environment == Running.WITH_OTHER_SERVER:
return [{
"InstanceDNS": "DNS NANE", # without http prefix
"Role": "DEMISTO EVN" # e.g. 'Server Master'
}]
# END CHANGE ON LOCAL RUN #
return None
def map_server_to_port(env_results, instance_role):
"""
Arguments:
env_results: (dict)
env_results.json in server
instance_role: (str)
The amazon machine image environment whose IP we should connect to.
Returns:
(lst): The server url list to connect to
"""
ip_to_port_map = {env.get('InstanceDNS'): env.get('TunnelPort') for env in env_results if
instance_role in env.get('Role', '')}
return ip_to_port_map
def get_json_file(path):
with open(path, 'r') as json_file:
return json.loads(json_file.read())
def configure_servers_and_restart(build):
manual_restart = Build.run_environment == Running.WITH_LOCAL_SERVER
for server in build.servers:
configurations = dict()
configure_types = []
if is_redhat_instance(server.internal_ip):
configurations.update(DOCKER_HARDENING_CONFIGURATION_FOR_PODMAN)
configurations.update(NO_PROXY_CONFIG)
configurations['python.pass.extra.keys'] += "##--network=slirp4netns:cidr=192.168.0.0/16"
else:
configurations.update(DOCKER_HARDENING_CONFIGURATION)
configure_types.append('docker hardening')
configure_types.append('marketplace')
configurations.update(MARKET_PLACE_CONFIGURATION)
error_msg = 'failed to set {} configurations'.format(' and '.join(configure_types))
server.add_server_configuration(configurations, error_msg=error_msg, restart=not manual_restart)
if manual_restart:
input('restart your server and then press enter.')
else:
logging.info('Done restarting servers. Sleeping for 1 minute')
sleep(60)
def get_tests(build: Build) -> List[dict]:
"""
Selects the tests from that should be run in this execution and filters those that cannot run in this server version
Args:
build: Build object
Returns:
Test configurations from conf.json that should be run in this execution
"""
server_numeric_version: str = build.server_numeric_version
tests: dict = build.tests
if Build.run_environment == Running.CI_RUN:
filtered_tests = BuildContext._extract_filtered_tests()
if build.is_nightly:
# skip test button testing
logging.debug('Not running instance tests in nightly flow')
tests_for_iteration = []
else:
tests_for_iteration = [test for test in tests
if not filtered_tests or test.get('playbookID', '') in filtered_tests]
tests_for_iteration = filter_tests_with_incompatible_version(tests_for_iteration, server_numeric_version)
return tests_for_iteration
# START CHANGE ON LOCAL RUN #
return [
{
"playbookID": "Docker Hardening Test",
"fromversion": "5.0.0"
},
{
"integrations": "SplunkPy",
"playbookID": "SplunkPy-Test-V2",
"memory_threshold": 500,
"instance_names": "use_default_handler"
}
]
# END CHANGE ON LOCAL RUN #
def get_changed_integrations(build: Build) -> tuple:
"""
Return 2 lists - list of new integrations and list of modified integrations since the commit of the git_sha1.
Args:
build: the build object
Returns:
list of new integrations and list of modified integrations
"""
new_integrations_files, modified_integrations_files = get_new_and_modified_integration_files(
build.branch_name) if not build.is_private else ([], [])
new_integrations_names, modified_integrations_names = [], []
if new_integrations_files:
new_integrations_names = get_integration_names_from_files(new_integrations_files)
logging.debug(f'New Integrations Since Last Release:\n{new_integrations_names}')
if modified_integrations_files:
modified_integrations_names = get_integration_names_from_files(modified_integrations_files)
logging.debug(f'Updated Integrations Since Last Release:\n{modified_integrations_names}')
return new_integrations_names, modified_integrations_names
def get_pack_ids_to_install():
if Build.run_environment == Running.CI_RUN:
with open('./artifacts/content_packs_to_install.txt', 'r') as packs_stream:
pack_ids = packs_stream.readlines()
return [pack_id.rstrip('\n') for pack_id in pack_ids]
else:
# START CHANGE ON LOCAL RUN #
return [
'SplunkPy'
]
# END CHANGE ON LOCAL RUN #
def nightly_install_packs(build, install_method=None, pack_path=None, service_account=None):
threads_list = []
if not install_method:
raise Exception('Install method was not provided.')
# For each server url we install pack/ packs
for server in build.servers:
kwargs = {'client': server.client, 'host': server.internal_ip}
if service_account:
kwargs['service_account'] = service_account
if pack_path:
kwargs['pack_path'] = pack_path
threads_list.append(Thread(target=install_method, kwargs=kwargs))
run_threads_list(threads_list)
def install_nightly_pack(build):
nightly_install_packs(build, install_method=install_all_content_packs_for_nightly,
service_account=build.service_account)
create_nightly_test_pack()
nightly_install_packs(build, install_method=upload_zipped_packs,
pack_path=f'{Build.test_pack_target}/test_pack.zip')
logging.info('Sleeping for 45 seconds while installing nightly packs')
sleep(45)
def install_packs(build, pack_ids=None):
pack_ids = get_pack_ids_to_install() if pack_ids is None else pack_ids
installed_content_packs_successfully = True
for server in build.servers:
try:
_, flag = search_and_install_packs_and_their_dependencies(pack_ids, server.client)
if not flag:
raise Exception('Failed to search and install packs.')
except Exception:
logging.exception('Failed to search and install packs')
installed_content_packs_successfully = False
return installed_content_packs_successfully
def configure_server_instances(build: Build, tests_for_iteration, all_new_integrations, modified_integrations):
modified_module_instances = []
new_module_instances = []
testing_client = build.servers[0].client
for test in tests_for_iteration:
integrations = get_integrations_for_test(test, build.skipped_integrations_conf)
playbook_id = test.get('playbookID')
new_integrations, modified_integrations, unchanged_integrations, integration_to_status = group_integrations(
integrations, build.skipped_integrations_conf, all_new_integrations, modified_integrations
)
integration_to_status_string = '\n\t\t\t\t\t\t'.join(
[f'"{key}" - {val}' for key, val in integration_to_status.items()])
if integration_to_status_string:
logging.info(f'All Integrations for test "{playbook_id}":\n\t\t\t\t\t\t{integration_to_status_string}')
else:
logging.info(f'No Integrations for test "{playbook_id}"')
instance_names_conf = test.get('instance_names', [])
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf]
integrations_to_configure = modified_integrations[:]
integrations_to_configure.extend(unchanged_integrations)
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
new_ints_params_set = set_integration_params(build,
new_integrations,
build.secret_conf['integrations'],
instance_names_conf,
placeholders_map)
ints_to_configure_params_set = set_integration_params(build,
integrations_to_configure,
build.secret_conf['integrations'],
instance_names_conf, placeholders_map)
if not new_ints_params_set:
logging.error(f'failed setting parameters for integrations: {new_integrations}')
if not ints_to_configure_params_set:
logging.error(f'failed setting parameters for integrations: {integrations_to_configure}')
if not (new_ints_params_set and ints_to_configure_params_set):
continue
modified_module_instances_for_test, new_module_instances_for_test = configure_modified_and_new_integrations(
build,
integrations_to_configure,
new_integrations,
testing_client)
modified_module_instances.extend(modified_module_instances_for_test)
new_module_instances.extend(new_module_instances_for_test)
return modified_module_instances, new_module_instances
def configure_modified_and_new_integrations(build: Build,
modified_integrations_to_configure: list,
new_integrations_to_configure: list,
demisto_client_: demisto_client) -> tuple:
"""
Configures old and new integrations in the server configured in the demisto_client.
Args:
build: The build object
modified_integrations_to_configure: Integrations to configure that are already exists
new_integrations_to_configure: Integrations to configure that were created in this build
demisto_client: A demisto client
Returns:
A tuple with two lists:
1. List of configured instances of modified integrations
2. List of configured instances of new integrations
"""
modified_modules_instances = []
new_modules_instances = []
for integration in modified_integrations_to_configure:
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
module_instance = configure_integration_instance(integration, demisto_client_, placeholders_map)
if module_instance:
modified_modules_instances.append(module_instance)
for integration in new_integrations_to_configure:
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
module_instance = configure_integration_instance(integration, demisto_client_, placeholders_map)
if module_instance:
new_modules_instances.append(module_instance)
return modified_modules_instances, new_modules_instances
def instance_testing(build: Build,
all_module_instances: list,
pre_update: bool,
use_mock: bool = True,
first_call: bool = True) -> Tuple[set, set]:
"""
Runs 'test-module' command for the instances detailed in `all_module_instances`
Args:
build: An object containing the current build info.
all_module_instances: The integration instances that should be tested
pre_update: Whether this instance testing is before or after the content update on the server.
use_mock: Whether to use mock while testing mockable integrations. Should be used mainly with
private content build which aren't using the mocks.
first_call: indicates if its the first time the function is called from the same place
Returns:
A set of the successful tests containing the instance name and the integration name
A set of the failed tests containing the instance name and the integration name
"""
update_status = 'Pre' if pre_update else 'Post'
failed_tests = set()
successful_tests = set()
# Test all module instances (of modified + unchanged integrations) pre-updating content
if all_module_instances:
# only print start message if there are instances to configure
logging.info(f'Start of Instance Testing ("Test" button) ({update_status}-update)')
else:
logging.info(f'No integrations to configure for the chosen tests. ({update_status}-update)')
failed_instances = []
for instance in all_module_instances:
integration_of_instance = instance.get('brand', '')
instance_name = instance.get('name', '')
# If there is a failure, __test_integration_instance will print it
if integration_of_instance not in build.unmockable_integrations and use_mock:
success = test_integration_with_mock(build, instance, pre_update)
else:
testing_client = build.servers[0].reconnect_client()
success, _ = __test_integration_instance(testing_client, instance)
if not success:
failed_tests.add((instance_name, integration_of_instance))
failed_instances.append(instance)
else:
successful_tests.add((instance_name, integration_of_instance))
# in case some tests failed post update, wait a 15 secs, runs the tests again
if failed_instances and not pre_update and first_call:
logging.info("some post-update tests failed, sleeping for 15 seconds, then running the failed tests again")
sleep(15)
_, failed_tests = instance_testing(build, failed_instances, pre_update=False, first_call=False)
return successful_tests, failed_tests
def test_integration_with_mock(build: Build, instance: dict, pre_update: bool):
"""
Runs 'test-module' for given integration with mitmproxy
In case the playback mode fails and this is a pre-update run - a record attempt will be executed.
Args:
build: An object containing the current build info.
instance: A dict containing the instance details
pre_update: Whether this instance testing is before or after the content update on the server.
Returns:
The result of running the 'test-module' command for the given integration.
If a record was executed - will return the result of the 'test--module' with the record mode only.
"""
testing_client = build.servers[0].reconnect_client()
integration_of_instance = instance.get('brand', '')
logging.debug(f'Integration "{integration_of_instance}" is mockable, running test-module with mitmproxy')
has_mock_file = build.proxy.has_mock_file(integration_of_instance)
success = False
if has_mock_file:
with run_with_mock(build.proxy, integration_of_instance) as result_holder:
success, _ = __test_integration_instance(testing_client, instance)
result_holder[RESULT] = success
if not success:
logging.warning(f'Running test-module for "{integration_of_instance}" has failed in playback mode')
if not success and not pre_update:
logging.debug(f'Recording a mock file for integration "{integration_of_instance}".')
with run_with_mock(build.proxy, integration_of_instance, record=True) as result_holder:
success, _ = __test_integration_instance(testing_client, instance)
result_holder[RESULT] = success
if not success:
logging.debug(f'Record mode for integration "{integration_of_instance}" has failed.')
return success
def update_content_till_v6(build: Build):
threads_list = []
# For each server url we install content
for server in build.servers:
t = Thread(target=update_content_on_demisto_instance,
kwargs={'client': server.client, 'server': server.internal_ip, 'ami_name': build.ami_env})
threads_list.append(t)
run_threads_list(threads_list)
def disable_instances(build: Build):
for server in build.servers:
disable_all_integrations(server.client)
def create_nightly_test_pack():
test_pack_zip(Build.content_path, Build.test_pack_target)
def test_files(content_path):
packs_root = f'{content_path}/Packs'
packs = filter(lambda x: x.is_dir(), os.scandir(packs_root))
for pack_dir in packs:
if pack_dir in SKIPPED_PACKS:
continue
playbooks_root = f'{pack_dir.path}/TestPlaybooks'
if os.path.isdir(playbooks_root):
for playbook_path, playbook in get_test_playbooks_in_dir(playbooks_root):
yield playbook_path, playbook
if os.path.isdir(f'{playbooks_root}/NonCircleTests'):
for playbook_path, playbook in get_test_playbooks_in_dir(f'{playbooks_root}/NonCircleTests'):
yield playbook_path, playbook
def get_test_playbooks_in_dir(path):
playbooks = filter(lambda x: x.is_file(), os.scandir(path))
for playbook in playbooks:
yield playbook.path, playbook
def test_pack_metadata():
now = datetime.now().isoformat().split('.')[0]
now = f'{now}Z'
metadata = {
"name": "nightly test",
"id": str(uuid.uuid4()),
"description": "nightly test pack (all test playbooks and scripts).",
"created": now,
"updated": now,
"legacy": True,
"support": "Cortex XSOAR",
"supportDetails": {},
"author": "Cortex XSOAR",
"authorImage": "",
"certification": "certified",
"price": 0,
"serverMinVersion": "6.0.0",
"serverLicense": "",
"currentVersion": "1.0.0",
"general": [],
"tags": [],
"categories": [
"Forensics & Malware Analysis"
],
"contentItems": {},
"integrations": [],
"useCases": [],
"keywords": [],
"dependencies": {}
}
return json.dumps(metadata, indent=4)
def test_pack_zip(content_path, target):
with zipfile.ZipFile(f'{target}/test_pack.zip', 'w', zipfile.ZIP_DEFLATED) as zip_file:
zip_file.writestr('test_pack/metadata.json', test_pack_metadata())
for test_path, test in test_files(content_path):
if not test_path.endswith('.yml'):
continue
test = test.name
with open(test_path, 'r') as test_file:
if not (test.startswith('playbook-') or test.startswith('script-')):
test_type = find_type(_dict=yaml.safe_load(test_file), file_type='yml').value
test_file.seek(0)
test_target = f'test_pack/TestPlaybooks/{test_type}-{test}'
else:
test_target = f'test_pack/TestPlaybooks/{test}'
zip_file.writestr(test_target, test_file.read())
def get_non_added_packs_ids(build: Build):
"""
:param build: the build object
:return: all non added packs i.e. unchanged packs (dependencies) and modified packs
"""
compare_against = 'origin/master{}'.format('' if not build.branch_name == 'master' else '~1')
added_files = run_command(f'git diff --name-only --diff-filter=A '
f'{compare_against}..refs/heads/{build.branch_name} -- Packs/*/pack_metadata.json')
if os.getenv('CONTRIB_BRANCH'):
added_contrib_files = run_command(
'git status -uall --porcelain -- Packs/*/pack_metadata.json | grep "?? "').replace('?? ', '')
added_files = added_files if not added_contrib_files else '\n'.join([added_files, added_contrib_files])
added_files = filter(lambda x: x, added_files.split('\n'))
added_pack_ids = map(lambda x: x.split('/')[1], added_files)
return set(get_pack_ids_to_install()) - set(added_pack_ids)
def set_marketplace_url(servers, branch_name, ci_build_number):
url_suffix = quote_plus(f'{branch_name}/{ci_build_number}/xsoar')
config_path = 'marketplace.bootstrap.bypass.url'
config = {config_path: f'https://storage.googleapis.com/marketplace-ci-build/content/builds/{url_suffix}'}
for server in servers:
server.add_server_configuration(config, 'failed to configure marketplace custom url ', True)
logging.success('Updated marketplace url and restarted servers')
logging.info('sleeping for 60 seconds')
sleep(60)
@run_with_proxy_configured
def test_integrations_post_update(build: Build, new_module_instances: list, modified_module_instances: list) -> tuple:
"""
Runs 'test-module on all integrations for post-update check
Args:
build: A build object
new_module_instances: A list containing new integrations instances to run test-module on
modified_module_instances: A list containing old (existing) integrations instances to run test-module on
Returns:
* A list of integration names that have failed the 'test-module' execution post update
* A list of integration names that have succeeded the 'test-module' execution post update
"""
modified_module_instances.extend(new_module_instances)
successful_tests_post, failed_tests_post = instance_testing(build, modified_module_instances, pre_update=False)
return successful_tests_post, failed_tests_post
def update_content_on_servers(build: Build) -> bool:
"""
Updates content on the build's server according to the server version
Args:
build: Build object
Returns:
A boolean that indicates whether the content installation was successful.
If the server version is lower then 5.9.9 will return the 'installed_content_packs_successfully' parameter as is
If the server version is higher or equal to 6.0 - will return True if the packs installation was successful
both before that update and after the update.
"""
installed_content_packs_successfully = True
if LooseVersion(build.server_numeric_version) < LooseVersion('6.0.0'):
update_content_till_v6(build)
elif not build.is_nightly:
set_marketplace_url(build.servers, build.branch_name, build.ci_build_number)
installed_content_packs_successfully = install_packs(build)
return installed_content_packs_successfully
@run_with_proxy_configured
def configure_and_test_integrations_pre_update(build: Build, new_integrations, modified_integrations) -> tuple:
"""
Configures integration instances that exist in the current version and for each integration runs 'test-module'.
Args:
build: Build object
new_integrations: A list containing new integrations names
modified_integrations: A list containing modified integrations names
Returns:
A tuple consists of:
* A list of modified module instances configured
* A list of new module instances configured
* A list of integrations that have failed the 'test-module' command execution
* A list of integrations that have succeeded the 'test-module' command execution
* A list of new integrations names
"""
tests_for_iteration = get_tests(build)
modified_module_instances, new_module_instances = configure_server_instances(build,
tests_for_iteration,
new_integrations,
modified_integrations)
successful_tests_pre, failed_tests_pre = instance_testing(build, modified_module_instances, pre_update=True)
return modified_module_instances, new_module_instances, failed_tests_pre, successful_tests_pre
def install_packs_pre_update(build: Build) -> bool:
"""
Install packs on server according to server version
Args:
build: A build object
Returns:
A boolean that indicates whether the installation was successful or not
"""
installed_content_packs_successfully = False
if LooseVersion(build.server_numeric_version) >= LooseVersion('6.0.0'):
if build.is_nightly:
install_nightly_pack(build)
installed_content_packs_successfully = True
else:
if not build.is_private:
pack_ids = get_non_added_packs_ids(build)
installed_content_packs_successfully = install_packs(build, pack_ids=pack_ids)
else:
installed_content_packs_successfully = True
return installed_content_packs_successfully
def main():
install_logging('Install_Content_And_Configure_Integrations_On_Server.log', logger=logging)
build = Build(options_handler())
logging.info(f"Build Number: {build.ci_build_number}")
configure_servers_and_restart(build)
disable_instances(build)
install_packs_pre_update(build)
new_integrations, modified_integrations = get_changed_integrations(build)
pre_update_configuration_results = configure_and_test_integrations_pre_update(build,
new_integrations,
modified_integrations)
modified_module_instances, new_module_instances, failed_tests_pre, successful_tests_pre = pre_update_configuration_results
installed_content_packs_successfully = update_content_on_servers(build)
successful_tests_post, failed_tests_post = test_integrations_post_update(build,
new_module_instances,
modified_module_instances)
success = report_tests_status(failed_tests_pre, failed_tests_post, successful_tests_pre, successful_tests_post,
new_integrations, build)
if not success or not installed_content_packs_successfully:
sys.exit(2)
if __name__ == '__main__':
main()
|
test_capture.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import io
import os
import pickle
import subprocess
import sys
import textwrap
from io import UnsupportedOperation
import py
from six import text_type
import pytest
from _pytest import capture
from _pytest.capture import CaptureManager
from _pytest.compat import _PY3
from _pytest.main import EXIT_NOTESTSCOLLECTED
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
needsosdup = pytest.mark.skipif(
not hasattr(os, "dup"), reason="test needs os.dup, not available on this platform"
)
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def StdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
class TestCaptureManager(object):
def test_getmethod_default_no_fd(self, monkeypatch):
from _pytest.capture import pytest_addoption
from _pytest.config.argparsing import Parser
parser = Parser()
pytest_addoption(parser)
default = parser._groups[0].options[0].default
assert default == "fd" if hasattr(os, "dup") else "sys"
parser = Parser()
monkeypatch.delattr(os, "dup", raising=False)
pytest_addoption(parser)
assert parser._groups[0].options[0].default == "sys"
@pytest.mark.parametrize(
"method", ["no", "sys", pytest.param("fd", marks=needsosdup)]
)
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.start_global_capturing()
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resume_global_capture()
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method != "no":
assert out == "hello\n"
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@needsosdup
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.start_global_capturing()
pytest.raises(AssertionError, capman.start_global_capturing)
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_unicode(testdir, method):
if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2, 2):
pytest.xfail("does not work on pypy < 2.2")
if sys.version_info >= (3, 0):
obj = "'b\u00f6y'"
else:
obj = "u'\u00f6y'"
testdir.makepyfile(
"""
# coding=utf8
# taken from issue 227 from nosetests
def test_unicode():
import sys
print(sys.stdout)
print(%s)
"""
% obj
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile(
"""
def test_unicode():
print('b\\u00f6y')
"""
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_collect_capturing(testdir):
p = testdir.makepyfile(
"""
print("collect %s failure" % 13)
import xyz42123
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*Captured stdout*", "*collect 13 failure*"])
class TestPerTestCapturing(object):
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile(
"""
def setup_module(mod):
print("setup module")
def setup_function(function):
print("setup " + function.__name__)
def test_func1():
print("in func1")
assert 0
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
]
)
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile(
"""
import sys
def setup_module(func):
print("module-setup")
def setup_function(func):
print("function-setup")
def test_func():
print("in function")
assert 0
def teardown_function(func):
print("in teardown")
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
]
)
def test_no_carry_over(self, testdir):
p = testdir.makepyfile(
"""
def test_func1():
print("in func1")
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile(
"""
def setup_function(function):
print("setup func1")
def teardown_function(function):
print("teardown func1")
assert 0
def test_func1():
print("in func1")
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*teardown_function*",
"*Captured stdout*",
"setup func1*",
"in func1*",
"teardown func1*",
# "*1 fixture failure*"
]
)
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile(
"""
def teardown_module(mod):
print("teardown module")
assert 0
def test_func():
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
]
)
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile(
"""\
import sys
def test_capturing():
print(42)
sys.stderr.write(str(23))
def test_capturing_error():
print(1)
sys.stderr.write(str(2))
raise ValueError
"""
)
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines(
[
"*test_capturing_outerr.py .F*",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
]
)
class TestLoggingInteraction(object):
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile(
"""\
def test_logging():
import logging
import pytest
stream = capture.CaptureIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
"""
)
result = testdir.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_function(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_function(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors show first!
)
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_module(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_module(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors come first
)
# verify proper termination
assert "closed" not in s
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
logging.warning("hello435")
"""
)
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stderr.fnmatch_lines(["WARNING*hello435*"])
assert "operation on closed file" not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello():
import logging
logging.warning("hello433")
assert 0
"""
)
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines(["WARNING*hello433*"])
assert "something" not in result.stderr.str()
assert "operation on closed file" not in result.stderr.str()
def test_logging_after_cap_stopped(self, testdir):
testdir.makeconftest(
"""\
import pytest
import logging
log = logging.getLogger(__name__)
@pytest.fixture
def log_on_teardown():
yield
log.warning('Logging on teardown')
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello(log_on_teardown):
import logging
logging.warning("hello433")
assert 1
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p, "--log-cli-level", "info")
assert result.ret != 0
result.stdout.fnmatch_lines(
["*WARNING*hello433*", "*WARNING*Logging on teardown*"]
)
assert (
"AttributeError: 'NoneType' object has no attribute 'resume_capturing'"
not in result.stderr.str()
)
class TestCaptureFixture(object):
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource(
"""\
def test_hello(capsys):
print(42)
out, err = capsys.readouterr()
assert out.startswith("42")
""",
*opt
)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*ERROR*setup*test_one*",
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 error*",
]
)
def test_capturing_getfixturevalue(self, testdir):
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
testdir.makepyfile(
"""\
def test_one(capsys, request):
request.getfixturevalue("capfd")
def test_two(capfd, request):
request.getfixturevalue("capsys")
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_one*",
"*capsys*capfd*same*time*",
"*test_two*",
"*capfd*capsys*same*time*",
"*2 failed in*",
]
)
def test_capsyscapfdbinary(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfdbinary):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*ERROR*setup*test_one*", "E*capfdbinary*capsys*same*time*", "*1 error*"]
)
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile(
"""\
def test_hello(cap{}):
print("xxx42xxx")
assert 0
""".format(
method
)
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["xxx42xxx"])
@needsosdup
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfd):
import os
os.write(1, "42".encode('ascii'))
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
"""
)
reprec.assertoutcome(passed=1)
@needsosdup
def test_capfdbinary(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfdbinary):
import os
# some likely un-decodable bytes
os.write(1, b'\\xfe\\x98\\x20')
out, err = capfdbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
sys.version_info < (3,), reason="only have capsysbinary in python 3"
)
def test_capsysbinary(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capsysbinary):
import sys
# some likely un-decodable bytes
sys.stdout.buffer.write(b'\\xfe\\x98\\x20')
out, err = capsysbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
sys.version_info >= (3,), reason="only have capsysbinary in python 3"
)
def test_capsysbinary_forbidden_in_python2(self, testdir):
testdir.makepyfile(
"""\
def test_hello(capsysbinary):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_hello*",
"*capsysbinary is only supported on python 3*",
"*1 error in*",
]
)
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capsys, missingarg):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*test_partial_setup_failure*", "*1 error*"])
@needsosdup
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capfd):
import os
os.write(1, str(42).encode('ascii'))
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
assert result.ret == 2
@pytest.mark.issue14
def test_capture_and_logging(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def test_log(capsys):
logging.error('x')
"""
)
result = testdir.runpytest_subprocess(p)
assert "closed" not in result.stderr.str()
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
@pytest.mark.parametrize("no_capture", [True, False])
def test_disabled_capture_fixture(self, testdir, fixture, no_capture):
testdir.makepyfile(
"""\
def test_disabled({fixture}):
print('captured before')
with {fixture}.disabled():
print('while capture is disabled')
print('captured after')
assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '')
def test_normal():
print('test_normal executed')
""".format(
fixture=fixture
)
)
args = ("-s",) if no_capture else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(
"""
*while capture is disabled*
"""
)
assert "captured before" not in result.stdout.str()
assert "captured after" not in result.stdout.str()
if no_capture:
assert "test_normal executed" in result.stdout.str()
else:
assert "test_normal executed" not in result.stdout.str()
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures(self, testdir, fixture):
"""
Ensure that capsys and capfd can be used by other fixtures during setup and teardown.
"""
testdir.makepyfile(
"""\
from __future__ import print_function
import sys
import pytest
@pytest.fixture
def captured_print({fixture}):
print('stdout contents begin')
print('stderr contents begin', file=sys.stderr)
out, err = {fixture}.readouterr()
yield out, err
print('stdout contents end')
print('stderr contents end', file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'stdout contents end\\n'
assert err == 'stderr contents end\\n'
def test_captured_print(captured_print):
out, err = captured_print
assert out == 'stdout contents begin\\n'
assert err == 'stderr contents begin\\n'
""".format(
fixture=fixture
)
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("*1 passed*")
assert "stdout contents begin" not in result.stdout.str()
assert "stderr contents begin" not in result.stdout.str()
@pytest.mark.parametrize("cap", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures_teardown(self, testdir, cap):
"""Ensure we can access setup and teardown buffers from teardown when using capsys/capfd (##3033)"""
testdir.makepyfile(
"""\
import sys
import pytest
import os
@pytest.fixture()
def fix({cap}):
print("setup out")
sys.stderr.write("setup err\\n")
yield
out, err = {cap}.readouterr()
assert out == 'setup out\\ncall out\\n'
assert err == 'setup err\\ncall err\\n'
def test_a(fix):
print("call out")
sys.stderr.write("call err\\n")
""".format(
cap=cap
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_runtest_setup(item):
raise ValueError(42)
"""
)
)
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, "--traceconfig")
result.stdout.fnmatch_lines(["*ValueError(42)*", "*1 error*"])
def test_fdfuncarg_skips_on_no_osdup(testdir):
testdir.makepyfile(
"""
import os
if hasattr(os, 'dup'):
del os.dup
def test_hello(capfd):
pass
"""
)
result = testdir.runpytest_subprocess("--capture=no")
result.stdout.fnmatch_lines(["*1 skipped*"])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
assert "hello19" not in result.stdout.str()
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile(
"""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
"""
)
result = testdir.runpytest("--cap=fd")
result.stdout.fnmatch_lines(
"""
*def test_func*
*assert 0*
*Captured*
*1 failed*
"""
)
def test_capture_early_option_parsing(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert "hello19" in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(
r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
"""
)
result = testdir.runpytest("--assert=plain")
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occur during readouterr"""
testdir.makepyfile(
pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
"""
)
result = testdir.runpytest_subprocess(
"-p", "pytest_xyz", "--version", syspathinsert=True
)
result.stderr.fnmatch_lines(
["*in bad_snap", " raise Exception('boom')", "Exception: boom"]
)
class TestCaptureIO(object):
def test_text(self):
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.CaptureIO()
if sys.version_info >= (3, 0):
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
else:
f.write(u"\u00f6")
f.write(b"hello")
s = f.getvalue()
f.close()
assert isinstance(s, text_type)
@pytest.mark.skipif(sys.version_info[0] == 2, reason="python 3 only behaviour")
def test_write_bytes_to_buffer(self):
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b"foo\r\n")
assert f.getvalue() == "foo\r\n"
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert not f.isatty()
pytest.raises(IOError, f.read)
pytest.raises(IOError, f.readlines)
iter_f = iter(f)
pytest.raises(IOError, next, iter_f)
pytest.raises(UnsupportedOperation, f.fileno)
f.close() # just for completeness
@pytest.mark.skipif("sys.version_info < (3,)", reason="python2 has no buffer")
def test_dontreadfrominput_buffer_python3():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
fb = f.buffer
assert not fb.isatty()
pytest.raises(IOError, fb.read)
pytest.raises(IOError, fb.readlines)
iter_f = iter(f)
pytest.raises(IOError, next, iter_f)
pytest.raises(ValueError, fb.fileno)
f.close() # just for completeness
@pytest.mark.skipif("sys.version_info >= (3,)", reason="python2 has no buffer")
def test_dontreadfrominput_buffer_python2():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
with pytest.raises(AttributeError):
f.buffer
f.close() # just for completeness
@pytest.yield_fixture
def tmpfile(testdir):
f = testdir.makepyfile("").open("wb+")
yield f
if not f.closed:
f.close()
@needsosdup
def test_dupfile(tmpfile):
flist = []
for i in range(5):
nf = capture.safe_text_dupfile(tmpfile, "wb")
assert nf != tmpfile
assert nf.fileno() != tmpfile.fileno()
assert nf not in flist
print(i, end="", file=nf)
flist.append(nf)
fname_open = flist[0].name
assert fname_open == repr(flist[0].buffer)
for i in range(5):
f = flist[i]
f.close()
fname_closed = flist[0].name
assert fname_closed == repr(flist[0].buffer)
assert fname_closed != fname_open
tmpfile.seek(0)
s = tmpfile.read()
assert "01234" in repr(s)
tmpfile.close()
assert fname_closed == repr(flist[0].buffer)
def test_dupfile_on_bytesio():
bio = io.BytesIO()
f = capture.safe_text_dupfile(bio, "wb")
f.write("hello")
assert bio.getvalue() == b"hello"
assert "BytesIO object" in f.name
def test_dupfile_on_textio():
tio = py.io.TextIO()
f = capture.safe_text_dupfile(tio, "wb")
f.write("hello")
assert tio.getvalue() == "hello"
assert not hasattr(f, "name")
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = subprocess.check_output(("lsof", "-p", str(pid))).decode()
except (OSError, subprocess.CalledProcessError, UnicodeDecodeError):
# about UnicodeDecodeError, see note on pytester
pytest.skip("could not run 'lsof'")
yield
out2 = subprocess.check_output(("lsof", "-p", str(pid))).decode()
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture(object):
pytestmark = needsosdup
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = b"hello"
os.write(fd, data)
s = cap.snap()
cap.done()
assert not s
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open("wb+") as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(ValueError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self, tmpfile):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == b""
def test_writeorg(self, tmpfile):
data1, data2 = b"foo", b"bar"
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2)
scap = cap.snap()
cap.done()
assert scap == data1.decode("ascii")
with open(tmpfile.name, "rb") as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self, tmpfile):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = b"hello"
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, b"world")
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, b"but now")
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AttributeError, cap.suspend)
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture(object):
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capture_results_accessible_by_attribute(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
capture_result = cap.readouterr()
assert capture_result.out == "hello"
assert capture_result.err == "world"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print("hxąć")
out, err = cap.readouterr()
assert out == u"hxąć\n"
@pytest.mark.skipif(
"sys.version_info >= (3,)", reason="text output different for bytes on python3"
)
def test_capturing_readouterr_decode_error_handling(self):
with self.getcapture() as cap:
# triggered an internal error in pytest
print("\xa6")
out, err = cap.readouterr()
assert out == u"\ufffd\n"
def test_reset_twice_error(self):
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.CaptureIO()
sys.stderr = capture.CaptureIO()
print("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
with self.getcapture():
pytest.raises(IOError, sys.stdin.read)
class TestStdCaptureFD(TestStdCapture):
pytestmark = needsosdup
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile(
"""
import os
def test_x():
os.write(1, "hello\\n".encode("ascii"))
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_x*
*assert 0*
*Captured stdout*
"""
)
def test_intermingling(self):
with self.getcapture() as cap:
os.write(1, b"1")
sys.stdout.write(str(2))
sys.stdout.flush()
os.write(1, b"3")
os.write(2, b"a")
sys.stderr.write("b")
sys.stderr.flush()
os.write(2, b"c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD(object):
pytestmark = needsosdup
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile(
"""
import os
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_,
Capture=capture.FDCapture)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
assert repr(cap.out) == "<FDCapture 1 oldfd=None>"
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
assert repr(cap.err) == "<FDCapture 2 oldfd=None>"
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
assert repr(cap.in_) == "<FDCapture 0 oldfd=None>"
cap.stop_capturing()
"""
)
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()["passed"] == 3
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
test_text = "test text"
print(test_text.encode(sys.stdout.encoding, "replace"))
(out, err) = capsys.readouterr()
assert out
assert err == ""
def test_capsys_results_accessible_by_attribute(capsys):
sys.stdout.write("spam")
sys.stderr.write("eggs")
capture_result = capsys.readouterr()
assert capture_result.out == "spam"
assert capture_result.err == "eggs"
@needsosdup
@pytest.mark.parametrize("use", [True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
if not use:
tmpfile = True
cap = StdCaptureFD(out=False, err=tmpfile)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
@needsosdup
def test_close_and_capture_again(testdir):
testdir.makepyfile(
"""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_capture_again*
*assert 0*
*stdout*
*hello*
"""
)
@pytest.mark.parametrize("method", ["SysCapture", "FDCapture"])
def test_capturing_and_logging_fundamentals(testdir, method):
if method == "StdCaptureFD" and not hasattr(os, "dup"):
pytest.skip("need os.dup")
# here we check a fundamental feature
p = testdir.makepyfile(
"""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(out=False, in_=False,
Capture=capture.%s)
cap.start_capturing()
logging.warning("hello1")
outerr = cap.readouterr()
print("suspend, captured %%s" %%(outerr,))
logging.warning("hello2")
cap.pop_outerr_to_orig()
logging.warning("hello3")
outerr = cap.readouterr()
print("suspend2, captured %%s" %% (outerr,))
"""
% (method,)
)
result = testdir.runpython(p)
result.stdout.fnmatch_lines(
"""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
"""
)
result.stderr.fnmatch_lines(
"""
WARNING:root:hello2
"""
)
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
assert sys.stdout.errors == "strict"
assert sys.stderr.errors == "strict"
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
not sys.platform.startswith("win") and sys.version_info[:2] >= (3, 6),
reason="only py3.6+ on windows",
)
def test_py36_windowsconsoleio_workaround_non_standard_streams():
"""
Ensure _py36_windowsconsoleio_workaround function works with objects that
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
"""
from _pytest.capture import _py36_windowsconsoleio_workaround
class DummyStream(object):
def write(self, s):
pass
stream = DummyStream()
_py36_windowsconsoleio_workaround(stream)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_crash_on_closing_tmpfile_py27(testdir):
p = testdir.makepyfile(
"""
from __future__ import print_function
import threading
import sys
printing = threading.Event()
def spam():
f = sys.stderr
print('SPAMBEFORE', end='', file=f)
printing.set()
while True:
try:
f.flush()
except (OSError, ValueError):
break
def test_spam_in_thread():
t = threading.Thread(target=spam)
t.daemon = True
t.start()
printing.wait()
"""
)
result = testdir.runpytest_subprocess(str(p))
assert result.ret == 0
assert result.stderr.str() == ""
assert "IOError" not in result.stdout.str()
def test_pickling_and_unpickling_encoded_file():
# See https://bitbucket.org/pytest-dev/pytest/pull-request/194
# pickle.loads() raises infinite recursion if
# EncodedFile.__getattr__ is not implemented properly
ef = capture.EncodedFile(None, None)
ef_as_str = pickle.dumps(ef)
pickle.loads(ef_as_str)
def test_global_capture_with_live_logging(testdir):
# Issue 3819
# capture should work with live cli logging
# Teardown report seems to have the capture for the whole process (setup, capture, teardown)
testdir.makeconftest(
"""
def pytest_runtest_logreport(report):
if "test_global" in report.nodeid:
if report.when == "teardown":
with open("caplog", "w") as f:
f.write(report.caplog)
with open("capstdout", "w") as f:
f.write(report.capstdout)
"""
)
testdir.makepyfile(
"""
import logging
import sys
import pytest
logger = logging.getLogger(__name__)
@pytest.fixture
def fix1():
print("fix setup")
logging.info("fix setup")
yield
logging.info("fix teardown")
print("fix teardown")
def test_global(fix1):
print("begin test")
logging.info("something in test")
print("end test")
"""
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
with open("caplog", "r") as f:
caplog = f.read()
assert "fix setup" in caplog
assert "something in test" in caplog
assert "fix teardown" in caplog
with open("capstdout", "r") as f:
capstdout = f.read()
assert "fix setup" in capstdout
assert "begin test" in capstdout
assert "end test" in capstdout
assert "fix teardown" in capstdout
@pytest.mark.parametrize("capture_fixture", ["capsys", "capfd"])
def test_capture_with_live_logging(testdir, capture_fixture):
# Issue 3819
# capture should work with live cli logging
testdir.makepyfile(
"""
import logging
import sys
logger = logging.getLogger(__name__)
def test_capture({0}):
print("hello")
sys.stderr.write("world\\n")
captured = {0}.readouterr()
assert captured.out == "hello\\n"
assert captured.err == "world\\n"
logging.info("something")
print("next")
logging.info("something")
captured = {0}.readouterr()
assert captured.out == "next\\n"
""".format(
capture_fixture
)
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
def test_typeerror_encodedfile_write(testdir):
"""It should behave the same with and without output capturing (#4861)."""
p = testdir.makepyfile(
"""
def test_fails():
import sys
sys.stdout.write(b"foo")
"""
)
result_without_capture = testdir.runpytest("-s", str(p))
result_with_capture = testdir.runpytest(str(p))
assert result_with_capture.ret == result_without_capture.ret
if _PY3:
result_with_capture.stdout.fnmatch_lines(
["E TypeError: write() argument must be str, not bytes"]
)
else:
assert result_with_capture.ret == 0
|
__init__.py
|
# vim: sw=4:ts=4:et
#
# ACE Collectors
# These objects collect things for remote ACE nodes to analyze.
#
import logging
import os, os.path
import pickle
import shutil
import socket
import threading
import uuid
import ace_api
import saq
from saq.database import use_db, \
execute_with_retry, \
get_db_connection, \
enable_cached_db_connections, \
disable_cached_db_connections
from saq.error import report_exception
import urllib3.exceptions
import requests.exceptions
# some constants used as return values
WORK_SUBMITTED = 1
NO_WORK_AVAILABLE = 2
NO_NODES_AVAILABLE = 3
NO_WORK_SUBMITTED = 4
# test modes
TEST_MODE_STARTUP = 'startup'
TEST_MODE_SINGLE_SUBMISSION = 'single_submission'
class Submission(object):
"""A single analysis submission.
Keep in mind that this object gets serialized into a database blob via the pickle module.
NOTE - The files parameter MUST be either a list of file names or a list of tuples of (source, dest)
NOT file descriptors."""
# this is basically just all the arguments that are passed to ace_api.submit
def __init__(self,
description,
analysis_mode,
tool,
tool_instance,
type,
event_time,
details,
observables,
tags,
files,
group_assignments=[]):
self.description = description
self.analysis_mode = analysis_mode
self.tool = tool
self.tool_instance = tool_instance
self.type = type
self.event_time = event_time
self.details = details
self.observables = observables
self.tags = tags
self.files = files
self.uuid = str(uuid.uuid4())
# list of RemoteNodeGroup.name values
# empty list means send to all configured groups
self.group_assignments = group_assignments
def __str__(self):
return "{} ({})".format(self.description, self.analysis_mode)
def success(self, result):
"""Called by the RemoteNodeGroup when this has been successfully submitted to a remote node.
result is the result of the ace_api.submit command for the submission
By default this deletes any files submitted."""
pass
def fail(self):
"""Called by the RemoteNodeGroup when this has failed to be submitted and full_delivery is disabled.
By default this deletes any files submitted."""
pass
class RemoteNode(object):
def __init__(self, id, name, location, any_mode, last_update, analysis_mode, workload_count):
self.id = id
self.name = name
self.location = location
self.any_mode = any_mode
self.last_update = last_update
self.analysis_mode = analysis_mode
self.workload_count = workload_count
# the directory that contains any files that to be transfered along with submissions
self.incoming_dir = os.path.join(saq.DATA_DIR, saq.CONFIG['collection']['incoming_dir'])
# apply any node translations that need to take effect
for key in saq.CONFIG['node_translation'].keys():
src, target = saq.CONFIG['node_translation'][key].split(',')
if self.location == src:
logging.debug("translating node {} to {}".format(self.location, target))
self.location = target
break
def __str__(self):
return "RemoteNode(id={},name={},location={})".format(self.id, self.name, self.location)
def submit(self, submission):
"""Attempts to submit the given Submission to this node."""
assert isinstance(submission, Submission)
# we need to convert the list of files to what is expected by the ace_api.submit function
_files = []
for f in submission.files:
if isinstance(f, tuple):
src_path, dest_name = f
_files.append((dest_name, open(os.path.join(self.incoming_dir, submission.uuid, os.path.basename(src_path)), 'rb')))
else:
_files.append((os.path.basename(f), open(os.path.join(self.incoming_dir, submission.uuid, os.path.basename(f)), 'rb')))
#files = [ (os.path.basename(f), open(os.path.join(self.incoming_dir, submission.uuid, os.path.basename(f)), 'rb')) for f in submission.files]
result = ace_api.submit(
submission.description,
remote_host=self.location,
ssl_verification=saq.CONFIG['SSL']['ca_chain_path'],
analysis_mode=submission.analysis_mode,
tool=submission.tool,
tool_instance=submission.tool_instance,
type=submission.type,
event_time=submission.event_time,
details=submission.details,
observables=submission.observables,
tags=submission.tags,
files=_files)
try:
result = result['result']
logging.info("submit remote {} submission {} uuid {}".format(self.location, submission, result['uuid']))
except Exception as e:
logging.warning("submission irregularity for {}: {}".format(submission, e))
# clean up our file descriptors
for name, fp in _files:
try:
fp.close()
except Exception as e:
logging.error("unable to close file descriptor for {}: {}".format(name, e))
return result
class RemoteNodeGroup(object):
"""Represents a collection of one or more RemoteNode objects that share the
same group configuration property."""
def __init__(self, name, coverage, full_delivery, company_id, database, group_id, workload_type_id, batch_size=32):
assert isinstance(name, str) and name
assert isinstance(coverage, int) and coverage > 0 and coverage <= 100
assert isinstance(full_delivery, bool)
assert isinstance(company_id, int)
assert isinstance(database, str)
assert isinstance(group_id, int)
assert isinstance(workload_type_id, int)
self.name = name
# this the percentage of submissions that are actually sent to this node group
self.coverage = coverage
self.coverage_counter = 0
# if full_delivery is True then all submissions assigned to the group will eventually be submitted
# if set to False then at least one attempt is made to submit
# setting to False is useful for QA and development type systems
self.full_delivery = full_delivery
# the company this node group belongs to
self.company_id = company_id
# the name of the database to query for node status
self.database = database
# the id of this group in the work_distribution_groups table
self.group_id = group_id
# the type of work that this collector works with
self.workload_type_id = workload_type_id
# the (maximum) number of work items to pull at once from the database
self.batch_size = batch_size
# metrics
self.assigned_count = 0 # how many emails were assigned to this group
self.skipped_count = 0 # how many emails have skipped due to coverage rules
self.delivery_failures = 0 # how many emails failed to delivery when full_delivery is disabled
# main thread of execution for this group
self.thread = None
# set this to True to gracefully shut down the group
self.shutdown_event = threading.Event()
# when do we think a node has gone offline
# each node (engine) should update it's status every [engine][node_status_update_frequency] seconds
# so we wait for twice that long until we think a node is offline
# at which point we no longer consider it for submissions
self.node_status_update_frequency = saq.CONFIG['engine'].getint('node_status_update_frequency')
# the directory that contains any files that to be transfered along with submissions
self.incoming_dir = os.path.join(saq.DATA_DIR, saq.CONFIG['collection']['incoming_dir'])
def start(self):
self.shutdown_event.clear()
# main thread of execution for this group
self.thread = threading.Thread(target=self.loop, name="RemoteNodeGroup {}".format(self.name))
self.thread.start()
def stop(self):
self.shutdown_event.set()
def wait(self):
self.thread.join()
def loop(self):
enable_cached_db_connections()
while True:
try:
result = self.execute()
# if we did something then we immediately look for more work unless we're shutting down
if result == WORK_SUBMITTED:
if self.shutdown_event.is_set():
break
# if were was no work available to be submitted then wait a second and look again
elif result == NO_WORK_AVAILABLE:
if self.shutdown_event.wait(1):
break
# if there were no NODES available then wait a little while longer and look again
elif result == NO_NODES_AVAILABLE:
if self.shutdown_event.wait(self.node_status_update_frequency / 2):
break
elif result == NO_WORK_SUBMITTED:
if self.shutdown_event.wait(1):
break
except Exception as e:
logging.error("unexpected exception thrown in loop for {}: {}".format(self, e))
report_exception()
if self.shutdown_event.wait(1):
break
disable_cached_db_connections()
@use_db
def execute(self, db, c):
# first we get a list of all the distinct analysis modes available in the work queue
c.execute("""
SELECT DISTINCT(incoming_workload.mode)
FROM
incoming_workload JOIN work_distribution ON incoming_workload.id = work_distribution.work_id
WHERE
incoming_workload.type_id = %s
AND work_distribution.group_id = %s
AND work_distribution.status = 'READY'
""", (self.workload_type_id, self.group_id,))
available_modes = c.fetchall()
db.commit()
# if we get nothing from this query then no work is available for this group
if not available_modes:
logging.debug("no work available for {}".format(self))
return NO_WORK_AVAILABLE
# flatten this out to a list of analysis modes
available_modes = [_[0] for _ in available_modes]
# given this list of modes that need remote targets, see what is currently available
with get_db_connection(self.database) as node_db:
node_c = node_db.cursor()
sql = """
SELECT
nodes.id,
nodes.name,
nodes.location,
nodes.any_mode,
nodes.last_update,
node_modes.analysis_mode,
COUNT(workload.id) AS 'WORKLOAD_COUNT'
FROM
nodes LEFT JOIN node_modes ON nodes.id = node_modes.node_id
LEFT JOIN workload ON nodes.id = workload.node_id
WHERE
nodes.company_id = %s
AND nodes.is_local = 0
AND TIMESTAMPDIFF(SECOND, nodes.last_update, NOW()) <= %s
AND ( nodes.any_mode OR node_modes.analysis_mode in ( {} ) )
GROUP BY
nodes.id,
nodes.name,
nodes.location,
nodes.any_mode,
nodes.last_update,
node_modes.analysis_mode
ORDER BY
WORKLOAD_COUNT ASC,
nodes.last_update ASC
""".format(','.join(['%s' for _ in available_modes]))
params = [ self.company_id, self.node_status_update_frequency * 2 ]
params.extend(available_modes)
node_c.execute(sql, tuple(params))
node_status = node_c.fetchall()
if not node_status:
logging.debug("no remote nodes are avaiable for all analysis modes {} for {}".format(
','.join(available_modes), self))
return NO_NODES_AVAILABLE
# now figure out what analysis modes are actually available for processing
analysis_mode_mapping = {} # key = analysis_mode, value = [ RemoteNode ]
any_mode_nodes = [] # list of nodes with any_mode set to True
for node_id, name, location, any_mode, last_update, analysis_mode, workload_count in node_status:
remote_node = RemoteNode(node_id, name, location, any_mode, last_update, analysis_mode, workload_count)
if any_mode:
any_mode_nodes.append(remote_node)
if analysis_mode:
if analysis_mode not in analysis_mode_mapping:
analysis_mode_mapping[analysis_mode] = []
analysis_mode_mapping[analysis_mode].append(remote_node)
# now we trim our list of analysis modes down to what is available
# if we don't have a node that supports any mode
if not any_mode_nodes:
available_modes = [m for m in available_modes if m in analysis_mode_mapping.keys()]
logging.debug("available_modes = {} after checking available nodes".format(available_modes))
if not available_modes:
logging.debug("no nodes are available that support the available analysis modes")
return NO_NODES_AVAILABLE
# now we get the next things to submit from the database that have an analysis mode that is currently
# available to be submitted to
sql = """
SELECT
incoming_workload.id,
incoming_workload.mode,
incoming_workload.work
FROM
incoming_workload JOIN work_distribution ON incoming_workload.id = work_distribution.work_id
WHERE
incoming_workload.type_id = %s
AND work_distribution.group_id = %s
AND incoming_workload.mode IN ( {} )
AND work_distribution.status = 'READY'
ORDER BY
incoming_workload.id ASC
LIMIT %s""".format(','.join(['%s' for _ in available_modes]))
params = [ self.workload_type_id, self.group_id ]
params.extend(available_modes)
params.append(self.batch_size)
c.execute(sql, tuple(params))
work_batch = c.fetchall()
db.commit()
logging.info("submitting {} items".format(len(work_batch)))
# simple flag that gets set if ANY submission is successful
submission_success = False
# we should have a small list of things to submit to remote nodes for this group
for work_id, analysis_mode, submission_blob in work_batch:
# first make sure we can un-pickle this
try:
submission = pickle.loads(submission_blob)
except Exception as e:
execute_with_retry(db, c, """UPDATE work_distribution SET status = 'COMPLETED'
WHERE group_id = %s AND work_id = %s""",
(self.group_id, self.work_id), commit=True)
logging.error("unable to un-pickle submission blob for id {}: {}".format(work_id, e))
# simple flag to remember if we failed to send
submission_failed = False
# the result of the submission (we pass to Submission.success later)
submission_result = None
self.coverage_counter += self.coverage
if self.coverage_counter < 100:
# we'll be skipping this one
logging.debug("skipping work id {} for group {} due to coverage constraints".format(
work_id, self.name))
else:
# otherwise we try to submit it
self.coverage_counter -= 100
# sort the list of RemoteNode objects by the workload_count
available_targets = any_mode_nodes[:]
if analysis_mode in analysis_mode_mapping:
available_targets.extend(analysis_mode_mapping[analysis_mode])
target = sorted(available_targets, key=lambda n: n.workload_count)
target = target[0]
# attempt the send
try:
submission_result = target.submit(submission)
logging.info("{} got submission result {} for {}".format(self, submission_result, submission))
submission_success = True
except Exception as e:
log_function = logging.warning
if not self.full_delivery:
log_function = logging.warning
else:
if not isinstance(e, urllib3.exceptions.MaxRetryError) \
and not isinstance(e, urllib3.exceptions.NewConnectionError) \
and not isinstance(e, requests.exceptions.ConnectionError):
# if it's not a connection issue then report it
report_exception()
log_function("unable to submit work item {} to {} via group {}: {}".format(
submission, target, self, e))
# if we are in full delivery mode then we need to try this one again later
if self.full_delivery:
continue
submission_failed = True
# at this point we either sent it or we tried and failed but that's OK
execute_with_retry(db, c, """UPDATE work_distribution SET status = 'COMPLETED'
WHERE group_id = %s AND work_id = %s""",
(self.group_id, work_id), commit=True)
# check to see if we're the last attempt on this work item
c.execute("""
SELECT
COUNT(*)
FROM
incoming_workload JOIN work_distribution ON incoming_workload.id = work_distribution.work_id
WHERE
incoming_workload.id = %s
AND work_distribution.status = 'READY'
""", (work_id,))
result = c.fetchone()
db.commit()
result = result[0]
if result == 0:
logging.debug("completed work item {}".format(submission))
execute_with_retry(db, c, "DELETE FROM incoming_workload WHERE id = %s", (work_id,), commit=True)
# give the collector a chance to do something with the
# submission BEFORE we delete the incoming directory for it
if submission_failed:
try:
submission.fail()
except Exception as e:
logging.error("call to {}.fail() failed: {}".format(submission, e))
report_exception()
else:
try:
submission.success(submission_result)
except Exception as e:
logging.error("call to {}.success() failed: {}".format(submission, e))
report_exception()
if submission.files:
try:
target_dir = os.path.join(self.incoming_dir, submission.uuid)
shutil.rmtree(target_dir)
logging.debug("deleted incoming dir {}".format(target_dir))
except Exception as e:
logging.error("unable to delete directory {}: {}".format(target_dir, e))
if submission_success:
return WORK_SUBMITTED
return NO_WORK_SUBMITTED
def __str__(self):
return "RemoteNodeGroup(name={}, coverage={}, full_delivery={}, company_id={}, database={})".format(
self.name, self.coverage, self.full_delivery, self.company_id, self.database)
class Collector(object):
def __init__(self, workload_type, delete_files=False, test_mode=None, collection_frequency=1):
# often used as the "tool_instance" property of analysis
self.fqdn = socket.getfqdn()
# the type of work this collector collects
# this maps to incoming_workload_type.name in the database
self.workload_type = workload_type
# get the workload type_id from the database, or, add it if it does not already exist
try:
with get_db_connection() as db:
c = db.cursor()
c.execute("SELECT id FROM incoming_workload_type WHERE name = %s", (self.workload_type,))
row = c.fetchone()
if row is None:
c.execute("INSERT INTO incoming_workload_type ( name ) VALUES ( %s )", (self.workload_type,))
db.commit()
c.execute("SELECT id FROM incoming_workload_type WHERE name = %s", (self.workload_type,))
row = c.fetchone()
if row is None:
raise ValueError("unable to create workload type for {}".format(self.workload_type))
self.workload_type_id = row[0]
logging.debug("got workload type id {} for {}".format(self.workload_type_id, self.workload_type))
except Exception as e:
logging.critical("unable to get workload type_id from database: {}".format(self.workload_type))
raise e
# set this to True to gracefully shut down the collector
self.shutdown_event = threading.Event()
# the list of RemoteNodeGroup targets this collector will send to
self.remote_node_groups = []
# the directory that contains any files that to be transfered along with submissions
self.incoming_dir = os.path.join(saq.DATA_DIR, saq.CONFIG['collection']['incoming_dir'])
# the directory that can contain various forms of persistence for collections
self.persistence_dir = os.path.join(saq.DATA_DIR, saq.CONFIG['collection']['persistence_dir'])
# if delete_files is True then any files copied for submission are deleted after being
# successfully added to the submission queue
# this is useful for collectors which are supposed to consume and clear the input
self.delete_files = delete_files
# test_mode gets set during unit testing
self.test_mode = test_mode
if self.test_mode is not None:
logging.info("*** COLLECTOR {} STARTED IN TEST MODE {} ***".format(self, self.test_mode))
# the total number of submissions sent to the RemoteNode objects (added to the incoming_workload table)
self.submission_count = 0
# how often to collect, defaults to 1 second
# NOTE there is no wait if something was previously collected
self.collection_frequency = collection_frequency
# create any required directories
for dir_path in [ self.incoming_dir, self.persistence_dir ]:
if not os.path.isdir(dir_path):
try:
logging.info("creating directory {}".format(dir_path))
os.makedirs(dir_path)
except Exception as e:
logging.critical("unable to create director {}: {}".format(dir_path, e))
sys.exit(1)
@use_db
def add_group(self, name, coverage, full_delivery, company_id, database, db, c):
c.execute("SELECT id FROM work_distribution_groups WHERE name = %s", (name,))
row = c.fetchone()
if row is None:
c.execute("INSERT INTO work_distribution_groups ( name ) VALUES ( %s )", (name,))
group_id = c.lastrowid
db.commit()
else:
group_id = row[0]
remote_node_group = RemoteNodeGroup(name, coverage, full_delivery, company_id, database, group_id, self.workload_type_id)
self.remote_node_groups.append(remote_node_group)
logging.info("added {}".format(remote_node_group))
return remote_node_group
def load_groups(self):
"""Loads groups from the ACE configuration file."""
for section in saq.CONFIG.keys():
if not section.startswith('collection_group_'):
continue
group_name = section[len('collection_group_'):]
coverage = saq.CONFIG[section].getint('coverage')
full_delivery = saq.CONFIG[section].getboolean('full_delivery')
company_id = saq.CONFIG[section].getint('company_id')
database = saq.CONFIG[section]['database']
logging.info("loaded group {} coverage {} full_delivery {} company_id {} database {}".format(
group_name, coverage, full_delivery, company_id, database))
self.add_group(group_name, coverage, full_delivery, company_id, database)
def start(self):
# you need to add at least one group to send to
if not self.remote_node_groups:
raise RuntimeError("no RemoteNodeGroup objects have been added to {}".format(self))
self.collection_thread = threading.Thread(target=self.loop, name="Collector")
self.collection_thread.start()
# start the node groups
for group in self.remote_node_groups:
group.start()
def initialize(self):
pass
def stop(self):
self.shutdown_event.set()
for group in self.remote_node_groups:
group.stop()
def wait(self):
logging.info("waiting for collection thread to terminate...")
self.collection_thread.join()
for group in self.remote_node_groups:
logging.info("waiting for {} thread to terminate...".format(group))
group.wait()
logging.info("collection ended")
def loop(self):
enable_cached_db_connections()
while True:
try:
self.execute()
except Exception as e:
logging.error("unexpected exception thrown during loop for {}: {}".format(self, e))
report_exception()
if self.shutdown_event.wait(1):
break
if self.shutdown_event.is_set():
break
disable_cached_db_connections()
@use_db
def execute(self, db, c):
if self.test_mode == TEST_MODE_STARTUP:
next_submission = None
elif self.test_mode == TEST_MODE_SINGLE_SUBMISSION and self.submission_count > 0:
next_submission = None
else:
next_submission = self.get_next_submission()
# did we not get anything to submit?
if next_submission is None:
# wait until we check again (defaults to 1 second, passed in on constructor)
self.shutdown_event.wait(self.collection_frequency)
return
if not isinstance(next_submission, Submission):
logging.critical("get_next_submission() must return an object derived from Submission")
# we COPY the files over to another directory for transfer
# we'll DELETE them later if we are able to copy them all and then insert the entry into the database
target_dir = None
if next_submission.files:
target_dir = os.path.join(self.incoming_dir, next_submission.uuid)
if os.path.exists(target_dir):
logging.error("target directory {} already exists".format(target_dir))
else:
try:
os.mkdir(target_dir)
for f in next_submission.files:
# this could be a tuple of (source_file, target_name)
if isinstance(f, tuple):
f = f[0]
target_path = os.path.join(target_dir, os.path.basename(f))
# TODO use hard links instead of copies to reduce I/O
shutil.copy2(f, target_path)
logging.debug("copied file from {} to {}".format(f, target_path))
except Exception as e:
logging.error("I/O error moving files into {}: {}".format(target_dir, e))
report_exception()
# we don't really need to change the file paths that are stored in the Submission object
# we just remember where we've moved them to (later)
try:
# add this as a workload item to the database queue
work_id = execute_with_retry(db, c, self.insert_workload, (next_submission,), commit=True)
assert isinstance(work_id, int)
logging.info("scheduled {} mode {}".format(next_submission.description, next_submission.analysis_mode))
except Exception as e:
# something went wrong -- delete our incoming directory if we created one
if target_dir:
try:
shutil.rmtree(target_dir)
except Exception as e:
logging.error("unable to delete directory {}: {}".format(target_dir, e))
raise e
# all is well -- delete the files we've copied into our incoming directory
if self.delete_files:
for f in next_submission.files:
# this could be a tuple of (source_file, target_name)
if isinstance(f, tuple):
f = f[0]
try:
os.remove(f)
except Exception as e:
logging.error("unable to delete file {}: {}".format(f, e))
self.submission_count += 1
def insert_workload(self, db, c, next_submission):
c.execute("INSERT INTO incoming_workload ( type_id, mode, work ) VALUES ( %s, %s, %s )",
(self.workload_type_id, next_submission.analysis_mode, pickle.dumps(next_submission)))
if c.lastrowid is None:
raise RuntimeError("missing lastrowid for INSERT transaction")
work_id = c.lastrowid
# assign this work to each configured group
node_groups = self.remote_node_groups
# does this submission have a defined set to groups to send to?
if next_submission.group_assignments:
node_groups = [ng for ng in node_groups if ng.name in next_submission.group_assignments]
if not node_groups:
# default to all groups if we end up with an empty list
logging.error("group assignment {} does not map to any known groups".format(next_submission.group_assignments))
node_groups = self.remote_node_groups
for remote_node_group in node_groups:
c.execute("INSERT INTO work_distribution ( work_id, group_id ) VALUES ( %s, %s )",
(work_id, remote_node_group.group_id))
return work_id
def get_next_submission(self):
"""Returns the next Submission object to be submitted to the remote nodes."""
raise NotImplementedError()
|
serverThreading.py
|
import os
from threading import Lock, Thread
from execution import Executor
import logging
import socket
from pathlib import Path
import os
executor = Executor()
lock = Lock()
thread_being_runned = 0
port = 1234
absolute_path = Path().absolute()
tempResultFile = os.path.join(absolute_path, "threads/tempResult.txt")
# tempResultFile = os.path.relpath(absolute_path)
# tempResultFile = pathlib.Path().resolve()
# tempResultFile = os.path.join(tempResultFile, "/threads/tempResult.txt")
#
# make a new socket for each client
# #
def readyServer():
global port, thread_being_runned
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = socket.gethostname()
print('[+]Server will start on host : ', host)
print("[+]Server is waiting on")
print("port: ", port)
s.bind(('', port))
print()
print('[!]Waiting for new client')
print()
s.listen(1)
conn, addr = s.accept()
if (addr):
clientThreadName = addr
thread_being_runned += 1
newClientThread = Thread(target=serveNewClient, name=clientThreadName, args=[conn, addr],)
newClientThread.start()
Thread(target=readyServer, name="newWait", args=[]).start()
def serveNewClient(conn, addr):
global port
print(addr, ' Has connected to the server')
print()
# INIT
global thread_being_runned
# making logger per thread
logger_name = 'logger' + str(thread_being_runned)
logger = logging.getLogger(logger_name)
formatter = logging.Formatter('[%(asctime)s] : %(message)s')
# output log file per thread
outputFile = "threads/outputClient" + str(thread_being_runned) + ".txt"
fileHandler = logging.FileHandler(outputFile, mode='w')
fileHandler.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(fileHandler)
#inputfile
file_name = "threads/inputClient" + str(thread_being_runned) + ".txt"
inputFile = open(file_name, 'w')
while 1:
inputFile = open(file_name, 'a')
incoming_message = conn.recv(5000)
incoming_message = incoming_message.decode()
inputFile.write("\nExecuting command: " + incoming_message)
if (incoming_message == 'close'):
break
my_commands = incoming_message.split(" ")
executor.execCommand(my_commands, logger)
lock.acquire()
log_file = open(tempResultFile,"r")
message = log_file.read()
# message += "\n[Current Dir]: " + executor.currentDirName
log_file.close
lock.release()
message = message.encode()
inputFile.close()
# close connection if client crashes unexpectedly
try:
conn.send(message)
except socket.error:
conn.close()
break
readyServer()
|
graph.py
|
import asyncio
import sys
from threading import Thread
from ..base import StreamEnd, StreamNone, StreamRepeat, TributaryException # noqa: F401
class StreamingGraph(object):
"""internal representation of the entire graph state"""
def __init__(self, node):
self._stop = False
self._starting_node = node # noqa F405
# coroutines to run on start and stop
self._onstarts = []
self._onstops = []
# Collect graph
self.getNodes()
def getNodes(self):
self._nodes = self._starting_node._deep_bfs()
# Run through nodes and extract onstarts and onstops
for ns in self._nodes:
for n in ns:
if n._onstarts:
self._onstarts.extend(list(n._onstarts))
if n._onstops:
self._onstops.extend(list(n._onstops))
# Check that all are async coroutines
for call in self._onstarts + self._onstops:
if not asyncio.iscoroutinefunction(call):
raise TributaryException(
"all onstarts and onstops must be async coroutines, got bad function: {}".format(
call
)
)
# return node levels
return self._nodes
def rebuild(self):
# TODO
return self._nodes
def stop(self):
self._stop = True
async def _run(self):
value, last, self._stop = None, None, False
# run onstarts
await asyncio.gather(*(asyncio.create_task(s()) for s in self._onstarts))
while True:
for level in self._nodes:
if self._stop:
break
await asyncio.gather(*(asyncio.create_task(n()) for n in level))
self.rebuild()
if self._stop:
break
value, last = self._starting_node.value(), value
if isinstance(value, StreamEnd):
break
# run `onstops`
await asyncio.gather(*(asyncio.create_task(s()) for s in self._onstops))
# return last val
return last
def run(self, blocking=True, newloop=False, start=True):
if sys.platform == "win32":
# Set to proactor event loop on window
# (default in python 3.8+)
loop = asyncio.ProactorEventLoop()
else:
if newloop:
loop = asyncio.new_event_loop()
else:
loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop)
task = loop.create_task(self._run())
if blocking:
# block until done
try:
return loop.run_until_complete(task)
except KeyboardInterrupt:
return
if start:
t = Thread(target=loop.run_until_complete, args=(task,))
t.daemon = True
t.start()
return loop
return loop, task
def graph(self):
return self._starting_node.graph()
def graphviz(self):
return self._starting_node.graphviz()
def dagre(self):
return self._starting_node.dagre()
|
runner.py
|
#!/usr/bin/env python
'''
Simple test runner
See settings.py file for options¶ms. Edit as needed.
These tests can be run in parallel using nose, for example
nosetests --processes=4 -v -s tests/runner.py
will use 4 processes. To install nose do something like
|pip install nose| or |sudo apt-get install python-nose|.
'''
from subprocess import Popen, PIPE, STDOUT
import os, unittest, tempfile, shutil, time, inspect, sys, math, glob, tempfile, re, difflib, webbrowser, hashlib, threading, platform, BaseHTTPServer, multiprocessing
if len(sys.argv) == 1:
print '''
==============================================================================
Running the main part of the test suite. Don't forget to run the other parts!
sanity - tests for first run, etc., modifies ~/.emscripten
benchmark - run before and after each set of changes before pushing to
master, verify no regressions
browser - runs pages in a web browser
To run one of those parts, do something like
python tests/runner.py sanity
To run a specific set of tests, you can do things like
python tests/runner.py o1
(that runs the o1 (-O1) tests). You can run individual tests with
python tests/runner.py test_hello_world
Combinations work too, for example
python tests/runner.py browser.test_sdl_image
==============================================================================
'''
time.sleep(2)
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path += [path_from_root('')]
import tools.shared
from tools.shared import *
# Sanity check for config
try:
assert COMPILER_OPTS != None
except:
raise Exception('Cannot find "COMPILER_OPTS" definition. Is %s set up properly? You may need to copy the template from settings.py into it.' % EM_CONFIG)
# Core test runner class, shared between normal tests and benchmarks
class RunnerCore(unittest.TestCase):
save_dir = os.environ.get('EM_SAVE_DIR')
save_JS = 0
stderr_redirect = STDOUT # This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
def setUp(self):
global Settings
Settings.reset()
Settings = tools.shared.Settings
self.banned_js_engines = []
if not self.save_dir:
dirname = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=TEMP_DIR)
else:
dirname = EMSCRIPTEN_TEMP_DIR
if not os.path.exists(dirname):
os.makedirs(dirname)
self.working_dir = dirname
os.chdir(dirname)
def tearDown(self):
if self.save_JS:
for name in os.listdir(self.get_dir()):
if name.endswith(('.o.js', '.cc.js')):
suff = '.'.join(name.split('.')[-2:])
shutil.copy(os.path.join(self.get_dir(), name),
os.path.join(TEMP_DIR, self.id().replace('__main__.', '').replace('.test_', '.')+'.'+suff))
if not self.save_dir:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.join(self.get_dir(), '..'))
shutil.rmtree(self.get_dir())
def skip(self, why):
print >> sys.stderr, '<skipping: %s> ' % why,
def get_dir(self):
return self.working_dir
def get_shared_library_name(self, linux_name):
if platform.system() == 'Linux':
return linux_name
elif platform.system() == 'Darwin':
return linux_name.replace('.so', '') + '.dylib'
else:
print >> sys.stderr, 'get_shared_library_name needs to be implemented on %s' % platform.system()
return linux_name
def get_stdout_path(self):
return os.path.join(self.get_dir(), 'stdout')
def prep_ll_run(self, filename, ll_file, force_recompile=False, build_ll_hook=None):
if ll_file.endswith(('.bc', '.o')):
if ll_file != filename + '.o':
shutil.copy(ll_file, filename + '.o')
Building.llvm_dis(filename)
else:
shutil.copy(ll_file, filename + '.o.ll')
#force_recompile = force_recompile or os.stat(filename + '.o.ll').st_size > 50000 # if the file is big, recompile just to get ll_opts # Recompiling just for dfe in ll_opts is too costly
if Building.LLVM_OPTS or force_recompile or build_ll_hook:
Building.ll_opts(filename)
if build_ll_hook:
need_post = build_ll_hook(filename)
Building.llvm_as(filename)
shutil.move(filename + '.o.ll', filename + '.o.ll.pre') # for comparisons later
if Building.LLVM_OPTS:
Building.llvm_opts(filename)
Building.llvm_dis(filename)
if build_ll_hook and need_post:
build_ll_hook(filename)
Building.llvm_as(filename)
shutil.move(filename + '.o.ll', filename + '.o.ll.post') # for comparisons later
Building.llvm_dis(filename)
# Generate JS from ll, and optionally modify the generated JS with a post_build function. Note
# that post_build is called on unoptimized JS, so we send it to emcc (otherwise, if run after
# emcc, it would not apply on the optimized/minified JS)
def ll_to_js(self, filename, extra_emscripten_args, post_build):
if type(post_build) in (list, tuple):
post1, post2 = post_build
else:
post1 = post_build
post2 = None
def run_post(post):
if not post: return
exec post in locals()
shutil.copyfile(filename + '.o.js', filename + '.o.js.prepost.js')
process(filename + '.o.js')
if self.emcc_args is None:
Building.emscripten(filename, append_ext=True, extra_args=extra_emscripten_args)
run_post(post1)
run_post(post2)
else:
transform_args = []
if post1:
transform_filename = os.path.join(self.get_dir(), 'transform.py')
transform = open(transform_filename, 'w')
transform.write('''
import sys
sys.path += [%r]
''' % path_from_root(''))
transform.write(post1)
transform.write('''
process(sys.argv[1])
''')
transform.close()
transform_args = ['--js-transform', "python %s" % transform_filename]
Building.emcc(filename + '.o.ll', Settings.serialize() + self.emcc_args + transform_args, filename + '.o.js')
run_post(post2)
# Build JavaScript code from source code
def build(self, src, dirname, filename, output_processor=None, main_file=None, additional_files=[], libraries=[], includes=[], build_ll_hook=None, extra_emscripten_args=[], post_build=None):
Building.pick_llvm_opts(3) # pick llvm opts here, so we include changes to Settings in the test case code
# Copy over necessary files for compiling the source
if main_file is None:
f = open(filename, 'w')
f.write(src)
f.close()
final_additional_files = []
for f in additional_files:
final_additional_files.append(os.path.join(dirname, os.path.basename(f)))
shutil.copyfile(f, final_additional_files[-1])
additional_files = final_additional_files
else:
# copy whole directory, and use a specific main .cpp file
# (rmtree() fails on Windows if the current working directory is inside the tree.)
if os.getcwd().startswith(os.path.abspath(dirname)):
os.chdir(os.path.join(dirname, '..'))
shutil.rmtree(dirname)
shutil.copytree(src, dirname)
shutil.move(os.path.join(dirname, main_file), filename)
# the additional files were copied; alter additional_files to point to their full paths now
additional_files = map(lambda f: os.path.join(dirname, f), additional_files)
os.chdir(self.get_dir())
# C++ => LLVM binary
for f in [filename] + additional_files:
try:
# Make sure we notice if compilation steps failed
os.remove(f + '.o')
except:
pass
args = [Building.COMPILER, '-emit-llvm'] + COMPILER_OPTS + Building.COMPILER_TEST_OPTS + \
['-I', dirname, '-I', os.path.join(dirname, 'include')] + \
map(lambda include: '-I' + include, includes) + \
['-c', f, '-o', f + '.o']
output = Popen(args, stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
assert os.path.exists(f + '.o'), 'Source compilation error: ' + output
# Link all files
if len(additional_files) + len(libraries) > 0:
shutil.move(filename + '.o', filename + '.o.alone')
Building.link([filename + '.o.alone'] + map(lambda f: f + '.o', additional_files) + libraries,
filename + '.o')
if not os.path.exists(filename + '.o'):
print "Failed to link LLVM binaries:\n\n", output
raise Exception("Linkage error");
# Finalize
self.prep_ll_run(filename, filename + '.o', build_ll_hook=build_ll_hook)
# BC => JS
self.ll_to_js(filename, extra_emscripten_args, post_build)
if output_processor is not None:
output_processor(open(filename + '.o.js').read())
def run_generated_code(self, engine, filename, args=[], check_timeout=True):
stdout = os.path.join(self.get_dir(), 'stdout') # use files, as PIPE can get too full and hang us
stderr = os.path.join(self.get_dir(), 'stderr')
try:
cwd = os.getcwd()
except:
cwd = None
os.chdir(self.get_dir())
run_js(filename, engine, args, check_timeout, stdout=open(stdout, 'w'), stderr=open(stderr, 'w'))
if cwd is not None:
os.chdir(cwd)
ret = open(stdout, 'r').read() + open(stderr, 'r').read()
assert 'strict warning:' not in ret, 'We should pass all strict mode checks: ' + ret
return ret
def build_native(self, filename):
Popen([CLANG, '-O2', filename, '-o', filename+'.native'], stdout=PIPE).communicate()[0]
def run_native(self, filename, args):
Popen([filename+'.native'] + args, stdout=PIPE).communicate()[0]
def assertIdentical(self, x, y):
if x != y:
raise Exception("Expected to have '%s' == '%s', diff:\n\n%s" % (
limit_size(x), limit_size(y),
limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(x.split('\n'), y.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]: values = [values]
for value in values:
if type(string) is not str: string = string()
if value in string: return # success
raise Exception("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string),
limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')])),
additional_info
))
def assertNotContained(self, value, string):
if type(value) is not str: value = value() # lazy loading
if type(string) is not str: string = string()
if value in string:
raise Exception("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
if not os.path.exists(ret):
os.makedirs(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'], configure_args=[], make=['make'], make_args=['-j', '2'], cache=True):
build_dir = self.get_build_dir()
output_dir = self.get_dir()
cache_name = name + '|' + Building.COMPILER
if self.library_cache is not None:
if cache and self.library_cache.get(cache_name):
print >> sys.stderr, '<load build from cache> ',
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, basename)
f = open(bc_file, 'wb')
f.write(contents)
f.close()
generated_libs.append(bc_file)
return generated_libs
print >> sys.stderr, '<building and saving into cache> ',
return Building.build_library(name, build_dir, output_dir, generated_libs, configure, configure_args, make, make_args, self.library_cache, cache_name,
copy_project=True)
def clear(self):
for name in os.listdir(self.get_dir()):
try_delete(name)
emcc_debug = os.environ.get('EMCC_DEBUG')
if emcc_debug:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
###################################################################################################
sys.argv = map(lambda arg: arg if not arg.startswith('test_') else 'default.' + arg, sys.argv)
if 'benchmark' not in str(sys.argv) and 'sanity' not in str(sys.argv) and 'browser' not in str(sys.argv):
# Tests
print "Running Emscripten tests..."
class T(RunnerCore): # Short name, to make it more fun to use manually on the commandline
## Does a complete test - builds, runs, checks output, etc.
def do_run(self, src, expected_output, args=[], output_nicerizer=None, output_processor=None, no_build=False, main_file=None, additional_files=[], js_engines=None, post_build=None, basename='src.cpp', libraries=[], includes=[], force_c=False, build_ll_hook=None, extra_emscripten_args=[]):
if force_c or (main_file is not None and main_file[-2:]) == '.c':
basename = 'src.c'
Building.COMPILER = to_cc(Building.COMPILER)
dirname = self.get_dir()
filename = os.path.join(dirname, basename)
if not no_build:
self.build(src, dirname, filename, main_file=main_file, additional_files=additional_files, libraries=libraries, includes=includes,
build_ll_hook=build_ll_hook, extra_emscripten_args=extra_emscripten_args, post_build=post_build)
# Run in both JavaScript engines, if optimizing - significant differences there (typed arrays)
if js_engines is None:
js_engines = JS_ENGINES
if Settings.USE_TYPED_ARRAYS:
js_engines = filter(lambda engine: engine != V8_ENGINE, js_engines) # V8 issue 1822
js_engines = filter(lambda engine: engine not in self.banned_js_engines, js_engines)
if len(js_engines) == 0: return self.skip('No JS engine present to run this test with. Check %s and settings.py and the paths therein.' % EM_CONFIG)
for engine in js_engines:
engine = filter(lambda arg: arg != '-n', engine) # SpiderMonkey issue 716255
js_output = self.run_generated_code(engine, filename + '.o.js', args)
if output_nicerizer is not None:
js_output = output_nicerizer(js_output)
self.assertContained(expected_output, js_output)
self.assertNotContained('ERROR', js_output)
#shutil.rmtree(dirname) # TODO: leave no trace in memory. But for now nice for debugging
# No building - just process an existing .ll file (or .bc, which we turn into .ll)
def do_ll_run(self, ll_file, expected_output=None, args=[], js_engines=None, output_nicerizer=None, post_build=None, force_recompile=False, build_ll_hook=None, extra_emscripten_args=[]):
filename = os.path.join(self.get_dir(), 'src.cpp')
self.prep_ll_run(filename, ll_file, force_recompile, build_ll_hook)
self.ll_to_js(filename, extra_emscripten_args, post_build)
self.do_run(None,
expected_output,
args,
no_build=True,
js_engines=js_engines,
output_nicerizer=output_nicerizer,
post_build=None) # post_build was already done in ll_to_js, this do_run call is just to test the output
def test_hello_world(self):
src = '''
#include <stdio.h>
int main()
{
printf("hello, world!\\n");
return 0;
}
'''
self.do_run(src, 'hello, world!')
def test_intvars(self):
src = '''
#include <stdio.h>
int global = 20;
int *far;
int main()
{
int x = 5;
int y = x+17;
int z = (y-1)/2; // Should stay an integer after division!
y += 1;
int w = x*3+4;
int k = w < 15 ? 99 : 101;
far = &k;
*far += global;
int i = k > 100; // Should be an int, not a bool!
int j = i << 6;
j >>= 1;
j = j ^ 5;
int h = 1;
h |= 0;
int p = h;
p &= 0;
printf("*%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n", x, y, z, w, k, i, j, h, p);
long hash = -1;
size_t perturb;
int ii = 0;
for (perturb = hash; ; perturb >>= 5) {
printf("%d:%d", ii, perturb);
ii++;
if (ii == 9) break;
printf(",");
}
printf("*\\n");
printf("*%.1d,%.2d*\\n", 56, 9);
// Fixed-point math on 64-bit ints. Tricky to support since we have no 64-bit shifts in JS
{
struct Fixed {
static int Mult(int a, int b) {
return ((long long)a * (long long)b) >> 16;
}
};
printf("fixed:%d\\n", Fixed::Mult(150000, 140000));
}
printf("*%ld*%p\\n", (long)21, &hash); // The %p should not enter an infinite loop!
return 0;
}
'''
self.do_run(src, '*5,23,10,19,121,1,37,1,0*\n0:-1,1:134217727,2:4194303,3:131071,4:4095,5:127,6:3,7:0,8:0*\n*56,09*\nfixed:320434\n*21*')
def test_sintvars(self):
Settings.CORRECT_SIGNS = 1 # Relevant to this test
src = '''
#include <stdio.h>
struct S {
char *match_start;
char *strstart;
};
int main()
{
struct S _s;
struct S *s = &_s;
unsigned short int sh;
s->match_start = (char*)32522;
s->strstart = (char*)(32780);
printf("*%d,%d,%d*\\n", (int)s->strstart, (int)s->match_start, (int)(s->strstart - s->match_start));
sh = s->strstart - s->match_start;
printf("*%d,%d*\\n", sh, sh>>7);
s->match_start = (char*)32999;
s->strstart = (char*)(32780);
printf("*%d,%d,%d*\\n", (int)s->strstart, (int)s->match_start, (int)(s->strstart - s->match_start));
sh = s->strstart - s->match_start;
printf("*%d,%d*\\n", sh, sh>>7);
}
'''
output = '*32780,32522,258*\n*258,2*\n*32780,32999,-219*\n*65317,510*'
Settings.CORRECT_OVERFLOWS = 0 # We should not need overflow correction to get this right
self.do_run(src, output, force_c=True)
def test_i64(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('i64 mode 1 requires ta2')
src = '''
#include <stdio.h>
int main()
{
long long a = 0x2b00505c10;
long long b = a >> 29;
long long c = a >> 32;
long long d = a >> 34;
printf("*%Ld,%Ld,%Ld,%Ld*\\n", a, b, c, d);
unsigned long long ua = 0x2b00505c10;
unsigned long long ub = ua >> 29;
unsigned long long uc = ua >> 32;
unsigned long long ud = ua >> 34;
printf("*%Ld,%Ld,%Ld,%Ld*\\n", ua, ub, uc, ud);
long long x = 0x0000def123450789ULL; // any bigger than this, and we
long long y = 0x00020ef123456089ULL; // start to run into the double precision limit!
printf("*%Ld,%Ld,%Ld,%Ld,%Ld*\\n", x, y, x | y, x & y, x ^ y, x >> 2, y << 2);
printf("*");
long long z = 13;
int n = 0;
while (z > 1) {
printf("%.2f,", (float)z); // these must be integers!
z = z >> 1;
n++;
}
printf("*%d*\\n", n);
return 0;
}
'''
self.do_run(src, '*184688860176,344,43,10*\n*184688860176,344,43,10*\n*245127260211081,579378795077769,808077213656969,16428841631881,791648372025088*\n*13.00,6.00,3.00,*3*')
src = r'''
#include <time.h>
#include <stdio.h>
#include <stdint.h>
int64_t returner1() { return 0x0000def123450789ULL; }
int64_t returner2(int test) {
while (test > 10) test /= 2; // confuse the compiler so it doesn't eliminate this function
return test > 5 ? 0x0000def123450123ULL : 0ULL;
}
void modifier1(int64_t t) {
t |= 12;
printf("m1: %Ld\n", t);
}
void modifier2(int64_t &t) {
t |= 12;
}
int truthy() {
int x = time(0);
while (x > 10) {
x |= 7;
x /= 2;
}
return x < 3;
}
struct IUB {
int c;
long long d;
};
IUB iub[] = {
{ 55, 17179869201 },
{ 122, 25769803837 },
};
int main(int argc, char **argv)
{
int64_t x1 = 0x1234def123450789ULL;
int64_t x2 = 0x1234def123450788ULL;
int64_t x3 = 0x1234def123450789ULL;
printf("*%Ld\n%d,%d,%d,%d,%d\n%d,%d,%d,%d,%d*\n", x1, x1==x2, x1<x2, x1<=x2, x1>x2, x1>=x2, // note: some rounding in the printing!
x1==x3, x1<x3, x1<=x3, x1>x3, x1>=x3);
printf("*%Ld*\n", returner1());
printf("*%Ld*\n", returner2(30));
uint64_t maxx = -1ULL;
printf("*%Lu*\n*%Lu*\n", maxx, maxx >> 5);
// Make sure params are not modified if they shouldn't be
int64_t t = 123;
modifier1(t);
printf("*%Ld*\n", t);
modifier2(t);
printf("*%Ld*\n", t);
// global structs with i64s
printf("*%d,%Ld*\n*%d,%Ld*\n", iub[0].c, iub[0].d, iub[1].c, iub[1].d);
// Bitshifts
{
int64_t a = -1;
int64_t b = a >> 29;
int64_t c = a >> 32;
int64_t d = a >> 34;
printf("*%Ld,%Ld,%Ld,%Ld*\n", a, b, c, d);
uint64_t ua = -1;
int64_t ub = ua >> 29;
int64_t uc = ua >> 32;
int64_t ud = ua >> 34;
printf("*%Ld,%Ld,%Ld,%Ld*\n", ua, ub, uc, ud);
}
// Nonconstant bitshifts
{
int64_t a = -1;
int64_t b = a >> (29 - argc + 1);
int64_t c = a >> (32 - argc + 1);
int64_t d = a >> (34 - argc + 1);
printf("*%Ld,%Ld,%Ld,%Ld*\n", a, b, c, d);
uint64_t ua = -1;
int64_t ub = ua >> (29 - argc + 1);
int64_t uc = ua >> (32 - argc + 1);
int64_t ud = ua >> (34 - argc + 1);
printf("*%Ld,%Ld,%Ld,%Ld*\n", ua, ub, uc, ud);
}
// Math mixtures with doubles
{
uint64_t a = 5;
double b = 6.8;
uint64_t c = a * b;
printf("*prod:%llu*\n*%d,%d,%d*\n", c, (int)&a, (int)&b, (int)&c); // printing addresses prevents optimizations
}
// Basic (rounded, for now) math. Just check compilation.
int64_t a = 0x1234def123450789ULL;
a--; if (truthy()) a--; // confuse optimizer
int64_t b = 0x1234000000450789ULL;
b++; if (truthy()) b--; // confuse optimizer
printf("*%Ld,%Ld,%Ld,%Ld*\n", (a+b)/5000, (a-b)/5000, (a*3)/5000, (a/5)/5000);
return 0;
}
'''
self.do_run(src, '*1311918518731868041\n' +
'0,0,0,1,1\n' +
'1,0,1,0,1*\n' +
'*245127260211081*\n' +
'*245127260209443*\n' +
'*18446744073709551615*\n' +
'*576460752303423487*\n' +
'm1: 127\n' +
'*123*\n' +
'*127*\n' +
'*55,17179869201*\n' +
'*122,25769803837*\n' +
'*-1,-1,-1,-1*\n' +
'*-1,34359738367,4294967295,1073741823*\n' +
'*-1,-1,-1,-1*\n' +
'*-1,34359738367,4294967295,1073741823*\n' +
'*prod:34*')
src = r'''
#include <stdio.h>
#include <limits>
int main()
{
long long i,j,k;
i = 0;
j = -1,
k = 1;
printf( "*\n" );
printf( "%s\n", i > j ? "Ok": "Fail" );
printf( "%s\n", k > i ? "Ok": "Fail" );
printf( "%s\n", k > j ? "Ok": "Fail" );
printf( "%s\n", i < j ? "Fail": "Ok" );
printf( "%s\n", k < i ? "Fail": "Ok" );
printf( "%s\n", k < j ? "Fail": "Ok" );
printf( "%s\n", (i-j) >= k ? "Ok": "Fail" );
printf( "%s\n", (i-j) <= k ? "Ok": "Fail" );
printf( "%s\n", i > std::numeric_limits<long long>::min() ? "Ok": "Fail" );
printf( "%s\n", i < std::numeric_limits<long long>::max() ? "Ok": "Fail" );
printf( "*\n" );
}
'''
self.do_run(src, '*\nOk\nOk\nOk\nOk\nOk\nOk\nOk\nOk\nOk\nOk\n*')
# stuff that also needs sign corrections
Settings.CORRECT_SIGNS = 1
src = r'''
#include <stdio.h>
#include <stdint.h>
int main()
{
// i32 vs i64
int32_t small = -1;
int64_t large = -1;
printf("*%d*\n", small == large);
small++;
printf("*%d*\n", small == large);
uint32_t usmall = -1;
uint64_t ularge = -1;
printf("*%d*\n", usmall == ularge);
return 0;
}
'''
self.do_run(src, '*1*\n*0*\n*0*\n')
def test_i64_b(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdio.h>
#include <sys/time.h>
typedef long long int64;
#define PRMJ_USEC_PER_SEC 1000000L
int main(int argc, char * argv[]) {
int64 sec = 1329409675 + argc;
int64 usec = 2329509675;
int64 mul = int64(sec) * PRMJ_USEC_PER_SEC;
int64 add = mul + int64(usec);
int add_low = add;
int add_high = add >> 32;
printf("*%lld,%lld,%u,%u*\n", mul, add, add_low, add_high);
return 0;
}
'''
self.do_run(src, '*1329409676000000,1329412005509675,3663280683,309527*\n')
def test_i64_cmp(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdio.h>
typedef long long int64;
bool compare(int64 val) {
return val == -12;
}
bool compare2(int64 val) {
return val < -12;
}
int main(int argc, char * argv[]) {
printf("*%d,%d,%d,%d,%d,%d*\n", argc, compare(argc-1-12), compare(1000+argc), compare2(argc-1-10), compare2(argc-1-14), compare2(argc+1000));
return 0;
}
'''
self.do_run(src, '*1,1,0,0,1,0*\n')
def test_i64_cmp2(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <inttypes.h>
#include <stdio.h>
typedef int32_t INT32;
typedef int64_t INT64;
typedef uint8_t UINT8;
void interface_clock_changed()
{
UINT8 m_divshift;
INT32 m_divisor;
//INT64 attos = m_attoseconds_per_cycle;
INT64 attos = 279365114840;
m_divshift = 0;
while (attos >= (1UL << 31))
{
m_divshift++;
printf("m_divshift is %i, on %Ld >?= %lu\n", m_divshift, attos, 1UL << 31);
attos >>= 1;
}
m_divisor = attos;
printf("m_divisor is %i\n",m_divisor);
}
int main() {
interface_clock_changed();
return 0;
}
'''
self.do_run(src, '''m_divshift is 1, on 279365114840 >?= 2147483648
m_divshift is 2, on 139682557420 >?= 2147483648
m_divshift is 3, on 69841278710 >?= 2147483648
m_divshift is 4, on 34920639355 >?= 2147483648
m_divshift is 5, on 17460319677 >?= 2147483648
m_divshift is 6, on 8730159838 >?= 2147483648
m_divshift is 7, on 4365079919 >?= 2147483648
m_divshift is 8, on 2182539959 >?= 2147483648
m_divisor is 1091269979
''')
def test_i64_double(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdio.h>
typedef long long int64;
#define JSDOUBLE_HI32_SIGNBIT 0x80000000
bool JSDOUBLE_IS_NEGZERO(double d)
{
union {
struct {
unsigned int lo, hi;
} s;
double d;
} x;
if (d != 0)
return false;
x.d = d;
return (x.s.hi & JSDOUBLE_HI32_SIGNBIT) != 0;
}
bool JSINT64_IS_NEGZERO(int64 l)
{
union {
int64 i;
double d;
} x;
if (l != 0)
return false;
x.i = l;
return x.d == -0;
}
int main(int argc, char * argv[]) {
printf("*%d,%d,%d,%d*\n", JSDOUBLE_IS_NEGZERO(0), JSDOUBLE_IS_NEGZERO(-0), JSDOUBLE_IS_NEGZERO(-1), JSDOUBLE_IS_NEGZERO(+1));
printf("*%d,%d,%d,%d*\n", JSINT64_IS_NEGZERO(0), JSINT64_IS_NEGZERO(-0), JSINT64_IS_NEGZERO(-1), JSINT64_IS_NEGZERO(+1));
return 0;
}
'''
self.do_run(src, '*0,0,0,0*\n*1,1,0,0*\n') # same as gcc
def test_i64_umul(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <inttypes.h>
#include <stdio.h>
typedef uint32_t UINT32;
typedef uint64_t UINT64;
int main() {
volatile UINT32 testu32a = 2375724032U;
UINT32 bigu32 = 0xffffffffU;
volatile UINT64 testu64a = 14746250828952703000U;
while ((UINT64)testu32a * (UINT64)bigu32 < testu64a) {
printf("testu64a is %llu\n", testu64a);
testu64a /= 2;
}
return 0;
}
'''
self.do_run(src, 'testu64a is 14746250828952703000\n')
def test_i64_precise(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <inttypes.h>
#include <stdio.h>
int main() {
uint64_t x = 0, y = 0;
for (int i = 0; i < 64; i++) {
x += 1ULL << i;
y += x;
x /= 3;
y *= 5;
printf("unsigned %d: %llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n", i, x, y, x+y, x-y, x*y, y ? x/y : 0, x ? y/x : 0, y ? x%y : 0, x ? y%x : 0);
}
int64_t x2 = 0, y2 = 0;
for (int i = 0; i < 64; i++) {
x2 += 1LL << i;
y2 += x2;
x2 /= 3 * (i % 7 ? -1 : 1);
y2 *= 5 * (i % 2 ? -1 : 1);
printf("signed %d: %lld,%lld,%lld,%lld,%lld,%lld,%lld,%lld,%lld\n", i, x2, y2, x2+y2, x2-y2, x2*y2, y2 ? x2/y2 : 0, x2 ? y2/x2 : 0, y2 ? x2%y2 : 0, x2 ? y2%x2 : 0);
}
return 0;
}
'''
self.do_run(src, open(path_from_root('tests', 'i64_precise.txt')).read())
# Verify that without precision, we do not include the precision code
Settings.PRECISE_I64_MATH = 0
self.do_run(src, 'unsigned')
code = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read()
assert 'goog.math.Long' not in code and 'jsbn' not in code, 'i64 precise math should not have been included if not asked for'
# Verify that even if we ask for precision, if it is not needed it is not included
Settings.PRECISE_I64_MATH = 1
src = '''
#include <inttypes.h>
#include <stdio.h>
int main(int argc, char **argv) {
uint64_t x = 2125299906845564, y = 1225891506842664;
if (argc == 12) {
x = x >> 1;
y = y >> 1;
}
x = x & 12ULL;
y = y | 12ULL;
x = x ^ y;
x <<= 2;
y >>= 3;
printf("*%llu, %llu*\\n", x, y);
}
'''
self.do_run(src, '*4903566027370624, 153236438355333*')
code = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read()
assert 'goog.math.Long' not in code and 'jsbn' not in code, 'i64 precise math should not have been included if not actually used'
def test_cube2hash(self):
# A good test of i64 math
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2 C-style memory aliasing')
self.do_run('', 'Usage: hashstring <seed>',
libraries=self.get_library('cube2hash', ['cube2hash.bc'], configure=None),
includes=[path_from_root('tests', 'cube2hash')])
for text, output in [('fleefl', '892BDB6FD3F62E863D63DA55851700FDE3ACF30204798CE9'),
('fleefl2', 'AA2CC5F96FC9D540CA24FDAF1F71E2942753DB83E8A81B61'),
('64bitisslow', '64D8470573635EC354FEE7B7F87C566FCAF1EFB491041670')]:
self.do_run('', 'hash value: ' + output, [text], no_build=True)
def test_unaligned(self):
if Settings.QUANTUM_SIZE == 1: return self.skip('No meaning to unaligned addresses in q1')
src = r'''
#include<stdio.h>
struct S {
double x;
int y;
};
int main() {
// the 64-bit value here will not always be 8-byte aligned
S s[3] = { {0x12a751f430142, 22}, {0x17a5c85bad144, 98}, {1, 1}};
printf("*%d : %d : %d\n", sizeof(S), ((unsigned int)&s[0]) % 8 != ((unsigned int)&s[1]) % 8,
((unsigned int)&s[1]) - ((unsigned int)&s[0]));
s[0].x++;
s[0].y++;
s[1].x++;
s[1].y++;
printf("%.1f,%d,%.1f,%d\n", s[0].x, s[0].y, s[1].x, s[1].y);
return 0;
}
'''
# TODO: A version of this with int64s as well
self.do_run(src, '*12 : 1 : 12\n328157500735811.0,23,416012775903557.0,99\n')
return # TODO: continue to the next part here
# Test for undefined behavior in C. This is not legitimate code, but does exist
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('No meaning to unaligned addresses without t2')
src = r'''
#include <stdio.h>
int main()
{
int x[10];
char *p = (char*)&x[0];
p++;
short *q = (short*)p;
*q = 300;
printf("*%d:%d*\n", *q, ((int)q)%2);
int *r = (int*)p;
*r = 515559;
printf("*%d*\n", *r);
long long *t = (long long*)p;
*t = 42949672960;
printf("*%Ld*\n", *t);
return 0;
}
'''
Settings.EMULATE_UNALIGNED_ACCESSES = 0
try:
self.do_run(src, '*300:1*\n*515559*\n*42949672960*\n')
except Exception, e:
assert 'must be aligned' in str(e), e # expected to fail without emulation
# XXX TODO Settings.EMULATE_UNALIGNED_ACCESSES = 1
#self.do_run(src, '*300:1*\n*515559*\n*42949672960*\n') # but succeeds with it
def test_unsigned(self):
Settings.CORRECT_SIGNS = 1 # We test for exactly this sort of thing here
Settings.CHECK_SIGNS = 0
src = '''
#include <stdio.h>
const signed char cvals[2] = { -1, -2 }; // compiler can store this is a string, so -1 becomes \FF, and needs re-signing
int main()
{
{
unsigned char x = 200;
printf("*%d*\\n", x);
unsigned char y = -22;
printf("*%d*\\n", y);
}
int varey = 100;
unsigned int MAXEY = -1, MAXEY2 = -77;
printf("*%u,%d,%u*\\n", MAXEY, varey >= MAXEY, MAXEY2); // 100 >= -1? not in unsigned!
int y = cvals[0];
printf("*%d,%d,%d,%d*\\n", cvals[0], cvals[0] < 0, y, y < 0);
y = cvals[1];
printf("*%d,%d,%d,%d*\\n", cvals[1], cvals[1] < 0, y, y < 0);
// zext issue - see mathop in jsifier
unsigned char x8 = -10;
unsigned long hold = 0;
hold += x8;
int y32 = hold+50;
printf("*%u,%u*\\n", hold, y32);
// Comparisons
x8 = 0;
for (int i = 0; i < 254; i++) x8++; // make it an actual 254 in JS - not a -2
printf("*%d,%d*\\n", x8+1 == 0xff, x8+1 != 0xff); // 0xff may be '-1' in the bitcode
return 0;
}
'''
self.do_run(src, '*4294967295,0,4294967219*\n*-1,1,-1,1*\n*-2,1,-2,1*\n*246,296*\n*1,0*')
# Now let's see some code that should just work in USE_TYPED_ARRAYS == 2, but requires
# corrections otherwise
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 0
Settings.CHECK_SIGNS = 1
else:
Settings.CORRECT_SIGNS = 1
Settings.CHECK_SIGNS = 0
src = '''
#include <stdio.h>
int main()
{
{
unsigned char x;
unsigned char *y = &x;
*y = -1;
printf("*%d*\\n", x);
}
{
unsigned short x;
unsigned short *y = &x;
*y = -1;
printf("*%d*\\n", x);
}
/*{ // This case is not checked. The hint for unsignedness is just the %u in printf, and we do not analyze that
unsigned int x;
unsigned int *y = &x;
*y = -1;
printf("*%u*\\n", x);
}*/
{
char x;
char *y = &x;
*y = 255;
printf("*%d*\\n", x);
}
{
char x;
char *y = &x;
*y = 65535;
printf("*%d*\\n", x);
}
{
char x;
char *y = &x;
*y = 0xffffffff;
printf("*%d*\\n", x);
}
return 0;
}
'''
self.do_run(src, '*255*\n*65535*\n*-1*\n*-1*\n*-1*')
def test_bitfields(self):
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # bitfields do loads on invalid areas, by design
src = '''
#include <stdio.h>
struct bitty {
unsigned x : 1;
unsigned y : 1;
unsigned z : 1;
};
int main()
{
bitty b;
printf("*");
for (int i = 0; i <= 1; i++)
for (int j = 0; j <= 1; j++)
for (int k = 0; k <= 1; k++) {
b.x = i;
b.y = j;
b.z = k;
printf("%d,%d,%d,", b.x, b.y, b.z);
}
printf("*\\n");
return 0;
}
'''
self.do_run(src, '*0,0,0,0,0,1,0,1,0,0,1,1,1,0,0,1,0,1,1,1,0,1,1,1,*')
def test_floatvars(self):
src = '''
#include <stdio.h>
int main()
{
float x = 1.234, y = 3.5, q = 0.00000001;
y *= 3;
int z = x < y;
printf("*%d,%d,%.1f,%d,%.4f,%.2f*\\n", z, int(y), y, (int)x, x, q);
/*
// Rounding behavior
float fs[6] = { -2.75, -2.50, -2.25, 2.25, 2.50, 2.75 };
double ds[6] = { -2.75, -2.50, -2.25, 2.25, 2.50, 2.75 };
for (int i = 0; i < 6; i++)
printf("*int(%.2f)=%d,%d*\\n", fs[i], int(fs[i]), int(ds[i]));
*/
return 0;
}
'''
self.do_run(src, '*1,10,10.5,1,1.2340,0.00*')
def test_math(self):
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
int main()
{
printf("*%.2f,%.2f,%d", M_PI, -M_PI, (1/0.0) > 1e300); // could end up as infinity, or just a very very big number
printf(",%d", finite(NAN) != 0);
printf(",%d", finite(INFINITY) != 0);
printf(",%d", finite(-INFINITY) != 0);
printf(",%d", finite(12.3) != 0);
printf(",%d", isinf(NAN) != 0);
printf(",%d", isinf(INFINITY) != 0);
printf(",%d", isinf(-INFINITY) != 0);
printf(",%d", isinf(12.3) != 0);
div_t div_result = div(23, 10);
printf(",%d", div_result.quot);
printf(",%d", div_result.rem);
double sine = -1.0, cosine = -1.0;
sincos(0.0, &sine, &cosine);
printf(",%1.1lf", sine);
printf(",%1.1lf", cosine);
float fsine = -1.0f, fcosine = -1.0f;
sincosf(0.0, &fsine, &fcosine);
printf(",%1.1f", fsine);
printf(",%1.1f", fcosine);
printf("*\\n");
return 0;
}
'''
self.do_run(src, '*3.14,-3.14,1,0,0,0,1,0,1,1,0,2,3,0.0,1.0,0.0,1.0*')
def test_math_hyperbolic(self):
src = open(path_from_root('tests', 'hyperbolic', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'hyperbolic', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_getgep(self):
# Generated code includes getelementptr (getelementptr, 0, 1), i.e., GEP as the first param to GEP
src = '''
#include <stdio.h>
struct {
int y[10];
int z[10];
} commonblock;
int main()
{
for (int i = 0; i < 10; ++i) {
commonblock.y[i] = 1;
commonblock.z[i] = 2;
}
printf("*%d %d*\\n", commonblock.y[0], commonblock.z[0]);
return 0;
}
'''
self.do_run(src, '*1 2*')
def test_multiply_defined_symbols(self):
a1 = "int f() { return 1; }"
a1_name = os.path.join(self.get_dir(), 'a1.c')
open(a1_name, 'w').write(a1)
a2 = "void x() {}"
a2_name = os.path.join(self.get_dir(), 'a2.c')
open(a2_name, 'w').write(a2)
b1 = "int f() { return 2; }"
b1_name = os.path.join(self.get_dir(), 'b1.c')
open(b1_name, 'w').write(b1)
b2 = "void y() {}"
b2_name = os.path.join(self.get_dir(), 'b2.c')
open(b2_name, 'w').write(b2)
main = r'''
#include <stdio.h>
int f();
int main() {
printf("result: %d\n", f());
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.c')
open(main_name, 'w').write(main)
Building.emcc(a1_name)
Building.emcc(a2_name)
Building.emcc(b1_name)
Building.emcc(b2_name)
Building.emcc(main_name)
liba_name = os.path.join(self.get_dir(), 'liba.a')
Building.emar('cr', liba_name, [a1_name + '.o', a2_name + '.o'])
libb_name = os.path.join(self.get_dir(), 'libb.a')
Building.emar('cr', libb_name, [b1_name + '.o', b2_name + '.o'])
all_name = os.path.join(self.get_dir(), 'all.bc')
Building.link([main_name + '.o', liba_name, libb_name], all_name)
self.do_ll_run(all_name, 'result: 1')
def test_if(self):
src = '''
#include <stdio.h>
int main()
{
int x = 5;
if (x > 3) {
printf("*yes*\\n");
}
return 0;
}
'''
self.do_run(src, '*yes*')
def test_if_else(self):
src = '''
#include <stdio.h>
int main()
{
int x = 5;
if (x > 10) {
printf("*yes*\\n");
} else {
printf("*no*\\n");
}
return 0;
}
'''
self.do_run(src, '*no*')
def test_loop(self):
src = '''
#include <stdio.h>
int main()
{
int x = 5;
for (int i = 0; i < 6; i++) {
x += x*i;
if (x > 1000) {
if (x % 7 == 0) printf("cheez\\n");
x /= 2;
break;
}
}
printf("*%d*\\n", x);
return 0;
}
'''
self.do_run(src, '*1800*')
generated = open('src.cpp.o.js', 'r').read()
assert '__label__ ==' not in generated, 'We should hoist into the loop'
def test_stack(self):
src = '''
#include <stdio.h>
int test(int i) {
int x = 10;
if (i > 0) {
return test(i-1);
}
return int(&x); // both for the number, and forces x to not be nativized
}
int main()
{
// We should get the same value for the first and last - stack has unwound
int x1 = test(0);
int x2 = test(100);
int x3 = test(0);
printf("*%d,%d*\\n", x3-x1, x2 != x1);
return 0;
}
'''
self.do_run(src, '*0,1*')
def test_strings(self):
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main(int argc, char **argv)
{
int x = 5, y = 9, magic = 7; // fool compiler with magic
memmove(&x, &y, magic-7); // 0 should not crash us
int xx, yy, zz;
char s[32];
int cc = sscanf("abc_10.b1_xyz_543_defg", "abc_%d.%2x_xyz_%3d_%3s", &xx, &yy, &zz, s);
printf("%d:%d,%d,%d,%s\\n", cc, xx, yy, zz, s);
printf("%d\\n", argc);
puts(argv[1]);
puts(argv[2]);
printf("%d\\n", atoi(argv[3])+2);
const char *foolingthecompiler = "\\rabcd";
printf("%d\\n", strlen(foolingthecompiler)); // Tests parsing /0D in llvm - should not be a 0 (end string) then a D!
printf("%s\\n", NULL); // Should print '(null)', not the string at address 0, which is a real address for us!
printf("/* a comment */\\n"); // Should not break the generated code!
printf("// another\\n"); // Should not break the generated code!
char* strdup_val = strdup("test");
printf("%s\\n", strdup_val);
free(strdup_val);
return 0;
}
'''
self.do_run(src, '4:10,177,543,def\n4\nwowie\ntoo\n76\n5\n(null)\n/* a comment */\n// another\ntest\n', ['wowie', 'too', '74'])
def test_errar(self):
src = r'''
#include <stdio.h>
#include <errno.h>
#include <string.h>
int main() {
char* err;
char buffer[200];
err = strerror(EDOM);
strerror_r(EWOULDBLOCK, buffer, 200);
printf("<%s>\n", err);
printf("<%s>\n", buffer);
printf("<%d>\n", strerror_r(EWOULDBLOCK, buffer, 0));
errno = 123;
printf("<%d>\n", errno);
return 0;
}
'''
expected = '''
<Numerical argument out of domain>
<Resource temporarily unavailable>
<34>
<123>
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_mainenv(self):
src = '''
#include <stdio.h>
int main(int argc, char **argv, char **envp)
{
printf("*%p*\\n", envp);
return 0;
}
'''
self.do_run(src, '*(nil)*')
def test_funcs(self):
src = '''
#include <stdio.h>
int funcy(int x)
{
return x*9;
}
int main()
{
printf("*%d,%d*\\n", funcy(8), funcy(10));
return 0;
}
'''
self.do_run(src, '*72,90*')
def test_structs(self):
src = '''
#include <stdio.h>
struct S
{
int x, y;
};
int main()
{
S a, b;
a.x = 5; a.y = 6;
b.x = 101; b.y = 7009;
S *c, *d;
c = &a;
c->x *= 2;
c = &b;
c->y -= 1;
d = c;
d->y += 10;
printf("*%d,%d,%d,%d,%d,%d,%d,%d*\\n", a.x, a.y, b.x, b.y, c->x, c->y, d->x, d->y);
return 0;
}
'''
self.do_run(src, '*10,6,101,7018,101,7018,101,7018*')
gen_struct_src = '''
#include <stdio.h>
#include <stdlib.h>
#include "emscripten.h"
struct S
{
int x, y;
};
int main()
{
S* a = {{gen_struct}};
a->x = 51; a->y = 62;
printf("*%d,%d*\\n", a->x, a->y);
{{del_struct}}(a);
return 0;
}
'''
def test_mallocstruct(self):
self.do_run(self.gen_struct_src.replace('{{gen_struct}}', '(S*)malloc(sizeof(S))').replace('{{del_struct}}', 'free'), '*51,62*')
def test_newstruct(self):
self.do_run(self.gen_struct_src.replace('{{gen_struct}}', 'new S').replace('{{del_struct}}', 'delete'), '*51,62*')
def test_addr_of_stacked(self):
src = '''
#include <stdio.h>
void alter(int *y)
{
*y += 5;
}
int main()
{
int x = 2;
alter(&x);
printf("*%d*\\n", x);
return 0;
}
'''
self.do_run(src, '*7*')
def test_globals(self):
src = '''
#include <stdio.h>
char cache[256], *next = cache;
int main()
{
cache[10] = 25;
next[20] = 51;
printf("*%d,%d*\\n", next[10], cache[20]);
return 0;
}
'''
self.do_run(src, '*25,51*')
def test_linked_list(self):
src = '''
#include <stdio.h>
struct worker_args {
int value;
struct worker_args *next;
};
int main()
{
worker_args a;
worker_args b;
a.value = 60;
a.next = &b;
b.value = 900;
b.next = NULL;
worker_args* c = &a;
int total = 0;
while (c) {
total += c->value;
c = c->next;
}
// Chunk of em
worker_args chunk[10];
for (int i = 0; i < 9; i++) {
chunk[i].value = i*10;
chunk[i].next = &chunk[i+1];
}
chunk[9].value = 90;
chunk[9].next = &chunk[0];
c = chunk;
do {
total += c->value;
c = c->next;
} while (c != chunk);
printf("*%d,%d*\\n", total, b.next);
// NULL *is* 0, in C/C++. No JS null! (null == 0 is false, etc.)
return 0;
}
'''
self.do_run(src, '*1410,0*')
def test_sup(self):
src = '''
#include <stdio.h>
struct S4 { int x; }; // size: 4
struct S4_2 { short x, y; }; // size: 4, but for alignment purposes, 2
struct S6 { short x, y, z; }; // size: 6
struct S6w { char x[6]; }; // size: 6 also
struct S6z { int x; short y; }; // size: 8, since we align to a multiple of the biggest - 4
struct C___ { S6 a, b, c; int later; };
struct Carr { S6 a[3]; int later; }; // essentially the same, but differently defined
struct C__w { S6 a; S6w b; S6 c; int later; }; // same size, different struct
struct Cp1_ { int pre; short a; S6 b, c; int later; }; // fillers for a
struct Cp2_ { int a; short pre; S6 b, c; int later; }; // fillers for a (get addr of the other filler)
struct Cint { S6 a; int b; S6 c; int later; }; // An int (different size) for b
struct C4__ { S6 a; S4 b; S6 c; int later; }; // Same size as int from before, but a struct
struct C4_2 { S6 a; S4_2 b; S6 c; int later; }; // Same size as int from before, but a struct with max element size 2
struct C__z { S6 a; S6z b; S6 c; int later; }; // different size, 8 instead of 6
int main()
{
#define TEST(struc) \\
{ \\
struc *s = 0; \\
printf("*%s: %d,%d,%d,%d<%d*\\n", #struc, (int)&(s->a), (int)&(s->b), (int)&(s->c), (int)&(s->later), sizeof(struc)); \\
}
#define TEST_ARR(struc) \\
{ \\
struc *s = 0; \\
printf("*%s: %d,%d,%d,%d<%d*\\n", #struc, (int)&(s->a[0]), (int)&(s->a[1]), (int)&(s->a[2]), (int)&(s->later), sizeof(struc)); \\
}
printf("sizeofs:%d,%d\\n", sizeof(S6), sizeof(S6z));
TEST(C___);
TEST_ARR(Carr);
TEST(C__w);
TEST(Cp1_);
TEST(Cp2_);
TEST(Cint);
TEST(C4__);
TEST(C4_2);
TEST(C__z);
return 1;
}
'''
if Settings.QUANTUM_SIZE == 1:
self.do_run(src, 'sizeofs:6,8\n*C___: 0,3,6,9<24*\n*Carr: 0,3,6,9<24*\n*C__w: 0,3,9,12<24*\n*Cp1_: 1,2,5,8<24*\n*Cp2_: 0,2,5,8<24*\n*Cint: 0,3,4,7<24*\n*C4__: 0,3,4,7<24*\n*C4_2: 0,3,5,8<20*\n*C__z: 0,3,5,8<28*')
else:
self.do_run(src, 'sizeofs:6,8\n*C___: 0,6,12,20<24*\n*Carr: 0,6,12,20<24*\n*C__w: 0,6,12,20<24*\n*Cp1_: 4,6,12,20<24*\n*Cp2_: 0,6,12,20<24*\n*Cint: 0,8,12,20<24*\n*C4__: 0,8,12,20<24*\n*C4_2: 0,6,10,16<20*\n*C__z: 0,8,16,24<28*')
def test_assert(self):
src = '''
#include <stdio.h>
#include <assert.h>
int main() {
assert(1 == true); // pass
assert(1 == false); // fail
return 1;
}
'''
self.do_run(src, 'Assertion failed: 1 == false')
def test_longjmp(self):
src = r'''
#include <stdio.h>
#include <setjmp.h>
static jmp_buf buf;
void second(void) {
printf("second\n"); // prints
longjmp(buf,1); // jumps back to where setjmp was called - making setjmp now return 1
}
void first(void) {
second();
printf("first\n"); // does not print
}
int main() {
int x = 0;
if ( ! setjmp(buf) ) {
x++;
first(); // when executed, setjmp returns 0
} else { // when longjmp jumps back, setjmp returns 1
printf("main: %d\n", x); // prints
}
return 0;
}
'''
# gcc -O0 and -O2 differ in what they do with the saved state of local vars - and we match that
if self.emcc_args is None or ('-O1' not in self.emcc_args and '-O2' not in self.emcc_args):
self.do_run(src, 'second\nmain: 1\n')
else:
self.do_run(src, 'second\nmain: 0\n')
def test_exceptions(self):
if Settings.QUANTUM_SIZE == 1: return self.skip("we don't support libcxx in q1")
self.banned_js_engines = [NODE_JS] # node issue 1669, exception causes stdout not to be flushed
Settings.DISABLE_EXCEPTION_CATCHING = 0
if self.emcc_args is None:
if Building.LLVM_OPTS: return self.skip('optimizing bitcode before emcc can confuse libcxx inclusion')
self.emcc_args = [] # libc++ auto-inclusion is only done if we use emcc
src = '''
#include <stdio.h>
void thrower() {
printf("infunc...");
throw(99);
printf("FAIL");
}
int main() {
try {
printf("*throw...");
throw(1);
printf("FAIL");
} catch(...) {
printf("caught!");
}
try {
thrower();
} catch(...) {
printf("done!*\\n");
}
return 1;
}
'''
self.do_run(src, '*throw...caught!infunc...done!*')
Settings.DISABLE_EXCEPTION_CATCHING = 1
self.do_run(src, 'Compiled code throwing an exception')
src = '''
#include <iostream>
class MyException
{
public:
MyException(){ std::cout << "Construct..."; }
MyException( const MyException & ) { std::cout << "Copy..."; }
~MyException(){ std::cout << "Destruct..."; }
};
int function()
{
std::cout << "Throw...";
throw MyException();
}
int function2()
{
return function();
}
int main()
{
try
{
function2();
}
catch (MyException & e)
{
std::cout << "Catched...";
}
try
{
function2();
}
catch (MyException e)
{
std::cout << "Catched...";
}
return 0;
}
'''
Settings.DISABLE_EXCEPTION_CATCHING = 0
self.do_run(src, 'Throw...Construct...Catched...Destruct...Throw...Construct...Copy...Catched...Destruct...Destruct...')
def test_uncaught_exception(self):
if self.emcc_args is None: return self.skip('no libcxx inclusion without emcc')
Settings.EXCEPTION_DEBUG = 0 # Messes up expected output.
Settings.DISABLE_EXCEPTION_CATCHING = 0
src = r'''
#include <stdio.h>
#include <exception>
struct X {
~X() {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
}
};
int main() {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
try {
X x;
throw 1;
} catch(...) {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
}
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
return 0;
}
'''
self.do_run(src, 'exception? no\nexception? yes\nexception? no\nexception? no\n')
src = r'''
#include <fstream>
#include <iostream>
int main() {
std::ofstream os("test");
os << std::unitbuf << "foo"; // trigger a call to std::uncaught_exception from
// std::basic_ostream::sentry::~sentry
std::cout << "success";
}
'''
self.do_run(src, 'success')
def test_typed_exceptions(self):
Settings.DISABLE_EXCEPTION_CATCHING = 0
Settings.SAFE_HEAP = 0 # Throwing null will cause an ignorable null pointer access.
Settings.EXCEPTION_DEBUG = 0 # Messes up expected output.
src = open(path_from_root('tests', 'exceptions', 'typed.cpp'), 'r').read()
expected = open(path_from_root('tests', 'exceptions', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_class(self):
src = '''
#include <stdio.h>
struct Random {
enum { IM = 139968, IA = 3877, IC = 29573 };
Random() : last(42) {}
float get( float max = 1.0f ) {
last = ( last * IA + IC ) % IM;
return max * last / IM;
}
protected:
unsigned int last;
} rng1;
int main()
{
Random rng2;
int count = 0;
for (int i = 0; i < 100; i++) {
float x1 = rng1.get();
float x2 = rng2.get();
printf("%f, %f\\n", x1, x2);
if (x1 != x2) count += 1;
}
printf("*%d*\\n", count);
return 0;
}
'''
self.do_run(src, '*0*')
def test_inherit(self):
src = '''
#include <stdio.h>
struct Parent {
int x1, x2;
};
struct Child : Parent {
int y;
};
int main()
{
Parent a;
a.x1 = 50;
a.x2 = 87;
Child b;
b.x1 = 78;
b.x2 = 550;
b.y = 101;
Child* c = (Child*)&a;
c->x1 ++;
c = &b;
c->y --;
printf("*%d,%d,%d,%d,%d,%d,%d*\\n", a.x1, a.x2, b.x1, b.x2, b.y, c->x1, c->x2);
return 0;
}
'''
self.do_run(src, '*51,87,78,550,100,78,550*')
def test_isdigit_l(self):
if self.emcc_args is None: return self.skip('no libcxx inclusion without emcc')
src = '''
#include <iostream>
int main() {
using namespace std;
use_facet<num_put<char> >(cout.getloc()).put(cout, cout, '0', 3.14159265);
}
'''
self.do_run(src, '3.14159')
def test_polymorph(self):
src = '''
#include <stdio.h>
struct Pure {
virtual int implme() = 0;
};
struct Parent : Pure {
virtual int getit() { return 11; };
int implme() { return 32; }
};
struct Child : Parent {
int getit() { return 74; }
int implme() { return 1012; }
};
struct Other {
int one() { return 11; }
int two() { return 22; }
};
int main()
{
Parent *x = new Parent();
Parent *y = new Child();
printf("*%d,%d,%d,%d*\\n", x->getit(), y->getit(), x->implme(), y->implme());
Other *o = new Other;
int (Other::*Ls)() = &Other::one;
printf("*%d*\\n", (o->*(Ls))());
Ls = &Other::two;
printf("*%d*\\n", (o->*(Ls))());
return 0;
}
'''
self.do_run(src, '*11,74,32,1012*\n*11*\n*22*')
def test_dynamic_cast(self):
if self.emcc_args is None: return self.skip('need libcxxabi')
src = r'''
#include <stdio.h>
struct Support {
virtual void f() {
printf("f()\n");
}
};
struct Derived : Support {
};
int main() {
Support * p = new Derived;
dynamic_cast<Derived*>(p)->f();
}
'''
self.do_run(src, 'f()\n')
src = '''
#include <stdio.h>
class CBase { virtual void dummy() {} };
class CDerived : public CBase { int a; };
class CDerivedest : public CDerived { float b; };
int main ()
{
CBase *pa = new CBase;
CBase *pb = new CDerived;
CBase *pc = new CDerivedest;
printf("a1: %d\\n", dynamic_cast<CDerivedest*>(pa) != NULL);
printf("a2: %d\\n", dynamic_cast<CDerived*>(pa) != NULL);
printf("a3: %d\\n", dynamic_cast<CBase*>(pa) != NULL);
printf("b1: %d\\n", dynamic_cast<CDerivedest*>(pb) != NULL);
printf("b2: %d\\n", dynamic_cast<CDerived*>(pb) != NULL);
printf("b3: %d\\n", dynamic_cast<CBase*>(pb) != NULL);
printf("c1: %d\\n", dynamic_cast<CDerivedest*>(pc) != NULL);
printf("c2: %d\\n", dynamic_cast<CDerived*>(pc) != NULL);
printf("c3: %d\\n", dynamic_cast<CBase*>(pc) != NULL);
return 0;
}
'''
self.do_run(src, 'a1: 0\na2: 0\na3: 1\nb1: 0\nb2: 1\nb3: 1\nc1: 1\nc2: 1\nc3: 1\n')
def test_funcptr(self):
src = '''
#include <stdio.h>
int calc1() { return 26; }
int calc2() { return 90; }
typedef int (*fp_t)();
fp_t globally1 = calc1;
fp_t globally2 = calc2;
int nothing(const char *str) { return 0; }
int main()
{
fp_t fp = calc1;
void *vp = (void*)fp;
fp_t fpb = (fp_t)vp;
fp_t fp2 = calc2;
void *vp2 = (void*)fp2;
fp_t fpb2 = (fp_t)vp2;
printf("*%d,%d,%d,%d,%d,%d*\\n", fp(), fpb(), fp2(), fpb2(), globally1(), globally2());
fp_t t = calc1;
printf("*%d,%d", t == calc1, t == calc2);
t = calc2;
printf(",%d,%d*\\n", t == calc1, t == calc2);
int (*other)(const char *str);
other = nothing;
other("*hello!*");
other = puts;
other("*goodbye!*");
return 0;
}
'''
self.do_run(src, '*26,26,90,90,26,90*\n*1,0,0,1*\n*goodbye!*')
def test_mathfuncptr(self):
src = '''
#include <math.h>
#include <stdio.h>
int
main(void) {
float (*fn)(float) = &sqrtf;
float (*fn2)(float) = &fabsf;
printf("fn2(-5) = %d, fn(10) = %f\\n", (int)fn2(-5), fn(10));
return 0;
}
'''
self.do_run(src, 'fn2(-5) = 5, fn(10) = 3.16')
def test_emptyclass(self):
src = '''
#include <stdio.h>
struct Randomized {
Randomized(int x) {
printf("*zzcheezzz*\\n");
}
};
int main( int argc, const char *argv[] ) {
new Randomized(55);
return 0;
}
'''
self.do_run(src, '*zzcheezzz*')
def test_alloca(self):
src = '''
#include <stdio.h>
#include <stdlib.h>
int main() {
char *pc;
pc = (char *)alloca(5);
printf("z:%d*%d*\\n", pc > 0, (int)pc);
return 0;
}
'''
self.do_run(src, 'z:1*', force_c=True)
def test_alloca_stack(self):
if self.emcc_args is None: return # too slow in other modes
# We should not blow up the stack with numerous allocas
src = '''
#include <stdio.h>
#include <stdlib.h>
func(int i) {
char *pc = (char *)alloca(100);
*pc = i;
(*pc)++;
return (*pc) % 10;
}
int main() {
int total = 0;
for (int i = 0; i < 1024*1024; i++)
total += func(i);
printf("ok:%d*\\n", total);
return 0;
}
'''
self.do_run(src, 'ok:-32768*', force_c=True)
def test_stack_byval(self):
if self.emcc_args is None: return # too slow in other modes
# We should also not blow up the stack with byval arguments
src = r'''
#include<stdio.h>
struct vec {
int x, y, z;
vec(int x_, int y_, int z_) : x(x_), y(y_), z(z_) {}
static vec add(vec a, vec b) {
return vec(a.x+b.x, a.y+b.y, a.z+b.z);
}
};
int main() {
int total = 0;
for (int i = 0; i < 1000; i++) {
for (int j = 0; j < 1000; j++) {
vec c(i+i%10, j*2, i%255);
vec d(j*2, j%255, i%120);
vec f = vec::add(c, d);
total += (f.x + f.y + f.z) % 100;
total %= 10240;
}
}
printf("sum:%d*\n", total);
return 1;
}
'''
self.do_run(src, 'sum:9780*')
def test_stack_varargs(self):
if self.emcc_args is None: return # too slow in other modes
# We should not blow up the stack with numerous varargs
src = r'''
#include <stdio.h>
#include <stdlib.h>
void func(int i) {
printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
}
int main() {
for (int i = 0; i < 1024; i++)
func(i);
printf("ok!\n");
return 0;
}
'''
Settings.TOTAL_STACK = 1024
self.do_run(src, 'ok!')
def test_array2(self):
src = '''
#include <stdio.h>
static const double grid[4][2] = {
{-3/3.,-1/3.},{+1/3.,-3/3.},
{-1/3.,+3/3.},{+3/3.,+1/3.}
};
int main() {
for (int i = 0; i < 4; i++)
printf("%d:%.2f,%.2f ", i, grid[i][0], grid[i][1]);
printf("\\n");
return 0;
}
'''
self.do_run(src, '0:-1.00,-0.33 1:0.33,-1.00 2:-0.33,1.00 3:1.00,0.33')
def test_array2b(self):
src = '''
#include <stdio.h>
static const struct {
unsigned char left;
unsigned char right;
} prioritah[] = {
{6, 6}, {6, 6}, {7, 95}, {7, 7}
};
int main() {
printf("*%d,%d\\n", prioritah[1].left, prioritah[1].right);
printf("%d,%d*\\n", prioritah[2].left, prioritah[2].right);
return 0;
}
'''
self.do_run(src, '*6,6\n7,95*')
def test_constglobalstructs(self):
src = '''
#include <stdio.h>
struct IUB {
int c;
double p;
unsigned int pi;
};
IUB iub[] = {
{ 'a', 0.27, 5 },
{ 'c', 0.15, 4 },
{ 'g', 0.12, 3 },
{ 't', 0.27, 2 },
};
const unsigned char faceedgesidx[6][4] =
{
{ 4, 5, 8, 10 },
{ 6, 7, 9, 11 },
{ 0, 2, 8, 9 },
{ 1, 3, 10,11 },
{ 0, 1, 4, 6 },
{ 2, 3, 5, 7 },
};
int main( int argc, const char *argv[] ) {
printf("*%d,%d,%d,%d*\\n", iub[0].c, int(iub[1].p*100), iub[2].pi, faceedgesidx[3][2]);
return 0;
}
'''
self.do_run(src, '*97,15,3,10*')
def test_conststructs(self):
src = '''
#include <stdio.h>
struct IUB {
int c;
double p;
unsigned int pi;
};
int main( int argc, const char *argv[] ) {
int before = 70;
IUB iub[] = {
{ 'a', 0.3029549426680, 5 },
{ 'c', 0.15, 4 },
{ 'g', 0.12, 3 },
{ 't', 0.27, 2 },
};
int after = 90;
printf("*%d,%d,%d,%d,%d,%d*\\n", before, iub[0].c, int(iub[1].p*100), iub[2].pi, int(iub[0].p*10000), after);
return 0;
}
'''
self.do_run(src, '*70,97,15,3,3029,90*')
def test_mod_globalstruct(self):
src = '''
#include <stdio.h>
struct malloc_params {
size_t magic, page_size;
};
malloc_params mparams;
#define SIZE_T_ONE ((size_t)1)
#define page_align(S) (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
int main()
{
mparams.page_size = 4096;
printf("*%d,%d,%d,%d*\\n", mparams.page_size, page_align(1000), page_align(6000), page_align(66474));
return 0;
}
'''
self.do_run(src, '*4096,4096,8192,69632*')
def test_pystruct(self):
src = '''
#include <stdio.h>
// Based on CPython code
union PyGC_Head {
struct {
union PyGC_Head *gc_next;
union PyGC_Head *gc_prev;
size_t gc_refs;
} gc;
long double dummy; /* force worst-case alignment */
} ;
struct gc_generation {
PyGC_Head head;
int threshold; /* collection threshold */
int count; /* count of allocations or collections of younger
generations */
};
#define NUM_GENERATIONS 3
#define GEN_HEAD(n) (&generations[n].head)
/* linked lists of container objects */
static struct gc_generation generations[NUM_GENERATIONS] = {
/* PyGC_Head, threshold, count */
{{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700, 0},
{{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10, 0},
{{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10, 0},
};
int main()
{
gc_generation *n = NULL;
printf("*%d,%d,%d,%d,%d,%d,%d,%d*\\n",
(int)(&n[0]),
(int)(&n[0].head),
(int)(&n[0].head.gc.gc_next),
(int)(&n[0].head.gc.gc_prev),
(int)(&n[0].head.gc.gc_refs),
(int)(&n[0].threshold), (int)(&n[0].count), (int)(&n[1])
);
printf("*%d,%d,%d*\\n",
(int)(&generations[0]) ==
(int)(&generations[0].head.gc.gc_next),
(int)(&generations[0]) ==
(int)(&generations[0].head.gc.gc_prev),
(int)(&generations[0]) ==
(int)(&generations[1])
);
int x1 = (int)(&generations[0]);
int x2 = (int)(&generations[1]);
printf("*%d*\\n", x1 == x2);
for (int i = 0; i < NUM_GENERATIONS; i++) {
PyGC_Head *list = GEN_HEAD(i);
printf("%d:%d,%d\\n", i, (int)list == (int)(list->gc.gc_prev), (int)list ==(int)(list->gc.gc_next));
}
printf("*%d,%d,%d*\\n", sizeof(PyGC_Head), sizeof(gc_generation), int(GEN_HEAD(2)) - int(GEN_HEAD(1)));
}
'''
if Settings.QUANTUM_SIZE == 1:
# Compressed memory. Note that sizeof() does give the fat sizes, however!
self.do_run(src, '*0,0,0,1,2,3,4,5*\n*1,0,0*\n*0*\n0:1,1\n1:1,1\n2:1,1\n*12,20,5*')
else:
self.do_run(src, '*0,0,0,4,8,12,16,20*\n*1,0,0*\n*0*\n0:1,1\n1:1,1\n2:1,1\n*12,20,20*')
def test_ptrtoint(self):
src = '''
#include <stdio.h>
int main( int argc, const char *argv[] ) {
char *a = new char[10];
char *a0 = a+0;
char *a5 = a+5;
int *b = new int[10];
int *b0 = b+0;
int *b5 = b+5;
int c = (int)b5-(int)b0; // Emscripten should warn!
int d = (int)b5-(int)b0; // Emscripten should warn!
printf("*%d*\\n", (int)a5-(int)a0);
return 0;
}
'''
runner = self
def check_warnings(output):
runner.assertEquals(filter(lambda line: 'Warning' in line, output.split('\n')).__len__(), 4)
self.do_run(src, '*5*', output_processor=check_warnings)
def test_sizeof(self):
# Has invalid writes between printouts
Settings.SAFE_HEAP = 0
src = '''
#include <stdio.h>
#include <string.h>
#include "emscripten.h"
struct A { int x, y; };
int main( int argc, const char *argv[] ) {
int *a = new int[10];
int *b = new int[1];
int *c = new int[10];
for (int i = 0; i < 10; i++)
a[i] = 2;
*b = 5;
for (int i = 0; i < 10; i++)
c[i] = 8;
printf("*%d,%d,%d,%d,%d*\\n", a[0], a[9], *b, c[0], c[9]);
// Should overwrite a, but not touch b!
memcpy(a, c, 10*sizeof(int));
printf("*%d,%d,%d,%d,%d*\\n", a[0], a[9], *b, c[0], c[9]);
// Part 2
A as[3] = { { 5, 12 }, { 6, 990 }, { 7, 2 } };
memcpy(&as[0], &as[2], sizeof(A));
printf("*%d,%d,%d,%d,%d,%d*\\n", as[0].x, as[0].y, as[1].x, as[1].y, as[2].x, as[2].y);
return 0;
}
'''
self.do_run(src, '*2,2,5,8,8***8,8,5,8,8***7,2,6,990,7,2*', [], lambda x: x.replace('\n', '*'))
def test_emscripten_api(self):
#if Settings.MICRO_OPTS or Settings.RELOOP or Building.LLVM_OPTS: return self.skip('FIXME')
src = r'''
#include <stdio.h>
#include "emscripten.h"
int main() {
// EMSCRIPTEN_COMMENT("hello from the source");
emscripten_run_script("Module.print('hello world' + '!')");
printf("*%d*\n", emscripten_run_script_int("5*20"));
return 0;
}
'''
check = '''
def process(filename):
src = open(filename, 'r').read()
# TODO: restore this (see comment in emscripten.h) assert '// hello from the source' in src
'''
self.do_run(src, 'hello world!\n*100*', post_build=check)
def test_inlinejs(self):
src = r'''
#include <stdio.h>
int main() {
asm("Module.print('Inline JS is very cool')");
return 0;
}
'''
self.do_run(src, 'Inline JS is very cool')
def test_memorygrowth(self):
# With typed arrays in particular, it is dangerous to use more memory than TOTAL_MEMORY,
# since we then need to enlarge the heap(s).
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "emscripten.h"
int main()
{
char *buf1 = (char*)malloc(100);
char *data1 = "hello";
memcpy(buf1, data1, strlen(data1)+1);
float *buf2 = (float*)malloc(100);
float pie = 4.955;
memcpy(buf2, &pie, sizeof(float));
printf("*pre: %s,%.3f*\n", buf1, buf2[0]);
int totalMemory = emscripten_run_script_int("TOTAL_MEMORY");
char *buf3 = (char*)malloc(totalMemory+1);
char *buf4 = (char*)malloc(100);
float *buf5 = (float*)malloc(100);
//printf("totalMemory: %d bufs: %d,%d,%d,%d,%d\n", totalMemory, buf1, buf2, buf3, buf4, buf5);
assert((int)buf4 > (int)totalMemory && (int)buf5 > (int)totalMemory);
printf("*%s,%.3f*\n", buf1, buf2[0]); // the old heap data should still be there
memcpy(buf4, buf1, strlen(data1)+1);
memcpy(buf5, buf2, sizeof(float));
printf("*%s,%.3f*\n", buf4, buf5[0]); // and the new heap space should work too
return 0;
}
'''
self.do_run(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
def test_ssr(self): # struct self-ref
src = '''
#include <stdio.h>
// see related things in openjpeg
typedef struct opj_mqc_state {
unsigned int qeval;
int mps;
struct opj_mqc_state *nmps;
struct opj_mqc_state *nlps;
} opj_mqc_state_t;
static opj_mqc_state_t mqc_states[2] = {
{0x5600, 0, &mqc_states[2], &mqc_states[3]},
{0x5602, 1, &mqc_states[3], &mqc_states[2]},
};
int main() {
printf("*%d*\\n", (int)(mqc_states+1)-(int)mqc_states);
for (int i = 0; i < 2; i++)
printf("%d:%d,%d,%d,%d\\n", i, mqc_states[i].qeval, mqc_states[i].mps,
(int)mqc_states[i].nmps-(int)mqc_states, (int)mqc_states[i].nlps-(int)mqc_states);
return 0;
}
'''
if Settings.QUANTUM_SIZE == 1:
self.do_run(src, '''*4*\n0:22016,0,8,12\n1:22018,1,12,8\n''')
else:
self.do_run(src, '''*16*\n0:22016,0,32,48\n1:22018,1,48,32\n''')
def test_tinyfuncstr(self):
src = '''
#include <stdio.h>
struct Class {
static char *name1() { return "nameA"; }
char *name2() { return "nameB"; }
};
int main() {
printf("*%s,%s*\\n", Class::name1(), (new Class())->name2());
return 0;
}
'''
self.do_run(src, '*nameA,nameB*')
def test_llvmswitch(self):
Settings.CORRECT_SIGNS = 1
src = '''
#include <stdio.h>
#include <string.h>
int switcher(int p)
{
switch(p) {
case 'a':
case 'b':
case 'c':
return p-1;
case 0xfffffff1:
return p+1;
}
return p;
}
int main( int argc, const char *argv[] ) {
unsigned int x = 0xfffffff1;
x >>= 0; // force it to be unsigned for purpose of checking our switch comparison in signed/unsigned
printf("*%d,%d,%d,%d,%d,%d*\\n", switcher('a'), switcher('b'), switcher('c'), switcher(x), switcher(-15), switcher('e'));
return 0;
}
'''
self.do_run(src, '*96,97,98,-14,-14,101*')
def test_indirectbr(self):
src = '''
#include <stdio.h>
int main(void) {
const void *addrs[2] = { &&FOO, &&BAR };
// confuse the optimizer so it doesn't hardcode the jump and avoid generating an |indirectbr| instruction
int which = 0;
for (int x = 0; x < 1000; x++) which = (which + x*x) % 7;
which = (which % 2) + 1;
goto *addrs[which];
FOO:
printf("bad\\n");
return 1;
BAR:
printf("good\\n");
const void *addr = &&FOO;
goto *addr;
}
'''
self.do_run(src, 'good\nbad')
def test_pack(self):
src = '''
#include <stdio.h>
#include <string.h>
#pragma pack(push,1)
typedef struct header
{
unsigned char id;
unsigned short colour;
unsigned char desc;
} header;
#pragma pack(pop)
typedef struct fatheader
{
unsigned char id;
unsigned short colour;
unsigned char desc;
} fatheader;
int main( int argc, const char *argv[] ) {
header h, *ph = 0;
fatheader fh, *pfh = 0;
printf("*%d,%d,%d*\\n", sizeof(header), (int)((int)&h.desc - (int)&h.id), (int)(&ph[1])-(int)(&ph[0]));
printf("*%d,%d,%d*\\n", sizeof(fatheader), (int)((int)&fh.desc - (int)&fh.id), (int)(&pfh[1])-(int)(&pfh[0]));
return 0;
}
'''
if Settings.QUANTUM_SIZE == 1:
self.do_run(src, '*4,2,3*\n*6,2,3*')
else:
self.do_run(src, '*4,3,4*\n*6,4,6*')
def test_varargs(self):
if Settings.QUANTUM_SIZE == 1: return self.skip('FIXME: Add support for this')
src = '''
#include <stdio.h>
#include <stdarg.h>
void vary(const char *s, ...)
{
va_list v;
va_start(v, s);
char d[20];
vsnprintf(d, 20, s, v);
puts(d);
// Try it with copying
va_list tempva;
__va_copy(tempva, v);
vsnprintf(d, 20, s, tempva);
puts(d);
va_end(v);
}
void vary2(char color, const char *s, ...)
{
va_list v;
va_start(v, s);
char d[21];
d[0] = color;
vsnprintf(d+1, 20, s, v);
puts(d);
va_end(v);
}
#define GETMAX(pref, type) \
type getMax##pref(int num, ...) \
{ \
va_list vv; \
va_start(vv, num); \
type maxx = va_arg(vv, type); \
for (int i = 1; i < num; i++) \
{ \
type curr = va_arg(vv, type); \
maxx = curr > maxx ? curr : maxx; \
} \
va_end(vv); \
return maxx; \
}
GETMAX(i, int);
GETMAX(D, double);
int main() {
vary("*cheez: %d+%d*", 0, 24); // Also tests that '0' is not special as an array ender
vary("*albeit*"); // Should not fail with no var args in vararg function
vary2('Q', "%d*", 85);
int maxxi = getMaxi(6, 2, 5, 21, 4, -10, 19);
printf("maxxi:%d*\\n", maxxi);
double maxxD = getMaxD(6, (double)2.1, (double)5.1, (double)22.1, (double)4.1, (double)-10.1, (double)19.1);
printf("maxxD:%.2f*\\n", (float)maxxD);
// And, as a function pointer
void (*vfp)(const char *s, ...) = vary;
vfp("*vfp:%d,%d*", 22, 199);
return 0;
}
'''
self.do_run(src, '*cheez: 0+24*\n*cheez: 0+24*\n*albeit*\n*albeit*\nQ85*\nmaxxi:21*\nmaxxD:22.10*\n*vfp:22,199*\n*vfp:22,199*\n')
def test_structbyval(self):
# part 1: make sure that normally, passing structs by value works
src = r'''
#include <stdio.h>
struct point
{
int x, y;
};
void dump(struct point p) {
p.x++; // should not modify
p.y++; // anything in the caller!
printf("dump: %d,%d\n", p.x, p.y);
}
void dumpmod(struct point *p) {
p->x++; // should not modify
p->y++; // anything in the caller!
printf("dump: %d,%d\n", p->x, p->y);
}
int main( int argc, const char *argv[] ) {
point p = { 54, 2 };
printf("pre: %d,%d\n", p.x, p.y);
dump(p);
void (*dp)(point p) = dump; // And, as a function pointer
dp(p);
printf("post: %d,%d\n", p.x, p.y);
dumpmod(&p);
dumpmod(&p);
printf("last: %d,%d\n", p.x, p.y);
return 0;
}
'''
self.do_run(src, 'pre: 54,2\ndump: 55,3\ndump: 55,3\npost: 54,2\ndump: 55,3\ndump: 56,4\nlast: 56,4')
# Check for lack of warning in the generated code (they should appear in part 2)
generated = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read()
assert 'Casting a function pointer type to another with a different number of arguments.' not in generated, 'Unexpected warning'
# part 2: make sure we warn about mixing c and c++ calling conventions here
if not (self.emcc_args is None or self.emcc_args == []): return # Optimized code is missing the warning comments
header = r'''
struct point
{
int x, y;
};
'''
open(os.path.join(self.get_dir(), 'header.h'), 'w').write(header)
supp = r'''
#include <stdio.h>
#include "header.h"
void dump(struct point p) {
p.x++; // should not modify
p.y++; // anything in the caller!
printf("dump: %d,%d\n", p.x, p.y);
}
'''
supp_name = os.path.join(self.get_dir(), 'supp.c')
open(supp_name, 'w').write(supp)
main = r'''
#include <stdio.h>
#include "header.h"
#ifdef __cplusplus
extern "C" {
#endif
void dump(struct point p);
#ifdef __cplusplus
}
#endif
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
printf("pre: %d,%d\n", p.x, p.y);
dump(p);
void (*dp)(struct point p) = dump; // And, as a function pointer
dp(p);
printf("post: %d,%d\n", p.x, p.y);
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.cpp')
open(main_name, 'w').write(main)
Building.emcc(supp_name)
Building.emcc(main_name)
all_name = os.path.join(self.get_dir(), 'all.bc')
Building.link([supp_name + '.o', main_name + '.o'], all_name)
# This will fail! See explanation near the warning we check for, in the compiler source code
output = Popen(['python', EMCC, all_name], stderr=PIPE).communicate()
# Check for warning in the generated code
generated = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read()
assert 'Casting a function pointer type to another with a different number of arguments.' in output[1], 'Missing expected warning'
def test_stdlibs(self):
if Settings.USE_TYPED_ARRAYS == 2:
# Typed arrays = 2 + safe heap prints a warning that messes up our output.
Settings.SAFE_HEAP = 0
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
void clean()
{
printf("*cleaned*\\n");
}
int comparer(const void *a, const void *b) {
int aa = *((int*)a);
int bb = *((int*)b);
return aa - bb;
}
int main() {
// timeofday
timeval t;
gettimeofday(&t, NULL);
printf("*%d,%d\\n", int(t.tv_sec), int(t.tv_usec)); // should not crash
// atexit
atexit(clean);
// qsort
int values[6] = { 3, 2, 5, 1, 5, 6 };
qsort(values, 5, sizeof(int), comparer);
printf("*%d,%d,%d,%d,%d,%d*\\n", values[0], values[1], values[2], values[3], values[4], values[5]);
printf("*stdin==0:%d*\\n", stdin == 0); // check that external values are at least not NULL
printf("*%%*\\n");
printf("*%.1ld*\\n", 5);
printf("*%.1f*\\n", strtod("66", NULL)); // checks dependency system, as our strtod needs _isspace etc.
printf("*%ld*\\n", strtol("10", NULL, 0));
printf("*%ld*\\n", strtol("0", NULL, 0));
printf("*%ld*\\n", strtol("-10", NULL, 0));
printf("*%ld*\\n", strtol("12", NULL, 16));
printf("*%lu*\\n", strtoul("10", NULL, 0));
printf("*%lu*\\n", strtoul("0", NULL, 0));
printf("*%lu*\\n", strtoul("-10", NULL, 0));
printf("*malloc(0)!=0:%d*\\n", malloc(0) != 0); // We should not fail horribly
return 0;
}
'''
self.do_run(src, '*1,2,3,5,5,6*\n*stdin==0:0*\n*%*\n*5*\n*66.0*\n*10*\n*0*\n*-10*\n*18*\n*10*\n*0*\n*4294967286*\n*malloc(0)!=0:1*\n*cleaned*')
src = r'''
#include <stdio.h>
#include <stdbool.h>
int main() {
bool x = true;
bool y = false;
printf("*%d*\n", x != y);
return 0;
}
'''
self.do_run(src, '*1*', force_c=True)
def test_atexit(self):
# Confirms they are called in reverse order
src = r'''
#include <stdio.h>
#include <stdlib.h>
static void cleanA() {
printf("A");
}
static void cleanB() {
printf("B");
}
int main() {
atexit(cleanA);
atexit(cleanB);
return 0;
}
'''
self.do_run(src, 'BA')
def test_time(self):
# XXX Not sure what the right output is here. Looks like the test started failing with daylight savings changes. Modified it to pass again.
src = open(path_from_root('tests', 'time', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'time', 'output.txt'), 'r').read()
self.do_run(src, expected,
extra_emscripten_args=['-H', 'libc/time.h'])
#extra_emscripten_args=['-H', 'libc/fcntl.h,libc/sys/unistd.h,poll.h,libc/math.h,libc/langinfo.h,libc/time.h'])
def test_statics(self):
# static initializers save i16 but load i8 for some reason
if Settings.SAFE_HEAP:
Settings.SAFE_HEAP = 3
Settings.SAFE_HEAP_LINES = ['src.cpp:19', 'src.cpp:26']
src = '''
#include <stdio.h>
#include <string.h>
#define CONSTRLEN 32
void conoutfv(const char *fmt)
{
static char buf[CONSTRLEN];
strcpy(buf, fmt);
puts(buf);
}
struct XYZ {
float x, y, z;
XYZ(float a, float b, float c) : x(a), y(b), z(c) { }
static const XYZ& getIdentity()
{
static XYZ iT(1,2,3);
return iT;
}
};
struct S {
static const XYZ& getIdentity()
{
static const XYZ iT(XYZ::getIdentity());
return iT;
}
};
int main() {
conoutfv("*staticccz*");
printf("*%.2f,%.2f,%.2f*\\n", S::getIdentity().x, S::getIdentity().y, S::getIdentity().z);
return 0;
}
'''
self.do_run(src, '*staticccz*\n*1.00,2.00,3.00*')
def test_copyop(self):
# clang generated code is vulnerable to this, as it uses
# memcpy for assignments, with hardcoded numbers of bytes
# (llvm-gcc copies items one by one). See QUANTUM_SIZE in
# settings.js.
src = '''
#include <stdio.h>
#include <math.h>
#include <string.h>
struct vec {
double x,y,z;
vec() : x(0), y(0), z(0) { };
vec(const double a, const double b, const double c) : x(a), y(b), z(c) { };
};
struct basis {
vec a, b, c;
basis(const vec& v) {
a=v; // should not touch b!
printf("*%.2f,%.2f,%.2f*\\n", b.x, b.y, b.z);
}
};
int main() {
basis B(vec(1,0,0));
// Part 2: similar problem with memset and memmove
int x = 1, y = 77, z = 2;
memset((void*)&x, 0, sizeof(int));
memset((void*)&z, 0, sizeof(int));
printf("*%d,%d,%d*\\n", x, y, z);
memcpy((void*)&x, (void*)&z, sizeof(int));
memcpy((void*)&z, (void*)&x, sizeof(int));
printf("*%d,%d,%d*\\n", x, y, z);
memmove((void*)&x, (void*)&z, sizeof(int));
memmove((void*)&z, (void*)&x, sizeof(int));
printf("*%d,%d,%d*\\n", x, y, z);
return 0;
}
'''
self.do_run(src, '*0.00,0.00,0.00*\n*0,77,0*\n*0,77,0*\n*0,77,0*')
def test_memcpy(self):
src = '''
#include <stdio.h>
#include <string.h>
#define MAXX 48
void reset(unsigned char *buffer) {
for (int i = 0; i < MAXX; i++) buffer[i] = i+1;
}
void dump(unsigned char *buffer) {
for (int i = 0; i < MAXX-1; i++) printf("%2d,", buffer[i]);
printf("%d\\n", buffer[MAXX-1]);
}
int main() {
unsigned char buffer[MAXX];
for (int i = MAXX/4; i < MAXX-MAXX/4; i++) {
for (int j = MAXX/4; j < MAXX-MAXX/4; j++) {
for (int k = 1; k < MAXX/4; k++) {
if (i == j) continue;
if (i < j && i+k > j) continue;
if (j < i && j+k > i) continue;
printf("[%d,%d,%d] ", i, j, k);
reset(buffer);
memcpy(buffer+i, buffer+j, k);
dump(buffer);
}
}
}
return 0;
}
'''
def check(result):
return hashlib.sha1(result).hexdigest()
self.do_run(src, '6c9cdfe937383b79e52ca7a2cce83a21d9f5422c',
output_nicerizer = check)
def test_memmove(self):
src = '''
#include <stdio.h>
#include <string.h>
int main() {
char str[] = "memmove can be very useful....!";
memmove (str+20, str+15, 11);
puts(str);
return 0;
}
'''
self.do_run(src, 'memmove can be very very useful')
def test_bsearch(self):
if Settings.QUANTUM_SIZE == 1: return self.skip('Test cannot work with q1')
src = '''
#include <stdlib.h>
#include <stdio.h>
int cmp(const void* key, const void* member) {
return *(int *)key - *(int *)member;
}
void printResult(int* needle, int* haystack, unsigned int len) {
void *result = bsearch(needle, haystack, len, sizeof(unsigned int), cmp);
if (result == NULL) {
printf("null\\n");
} else {
printf("%d\\n", *(unsigned int *)result);
}
}
int main() {
int a[] = { -2, -1, 0, 6, 7, 9 };
int b[] = { 0, 1 };
/* Find all keys that exist. */
for(int i = 0; i < 6; i++) {
int val = a[i];
printResult(&val, a, 6);
}
/* Keys that are covered by the range of the array but aren't in
* the array cannot be found.
*/
int v1 = 3;
int v2 = 8;
printResult(&v1, a, 6);
printResult(&v2, a, 6);
/* Keys outside the range of the array cannot be found. */
int v3 = -1;
int v4 = 2;
printResult(&v3, b, 2);
printResult(&v4, b, 2);
return 0;
}
'''
self.do_run(src, '-2\n-1\n0\n6\n7\n9\nnull\nnull\nnull\nnull')
def test_nestedstructs(self):
src = '''
#include <stdio.h>
#include "emscripten.h"
struct base {
int x;
float y;
union {
int a;
float b;
};
char c;
};
struct hashtableentry {
int key;
base data;
};
struct hashset {
typedef hashtableentry entry;
struct chain { entry elem; chain *next; };
// struct chainchunk { chain chains[100]; chainchunk *next; };
};
struct hashtable : hashset {
hashtable() {
base *b = NULL;
entry *e = NULL;
chain *c = NULL;
printf("*%d,%d,%d,%d,%d,%d|%d,%d,%d,%d,%d,%d,%d,%d|%d,%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n",
sizeof(base),
int(&(b->x)), int(&(b->y)), int(&(b->a)), int(&(b->b)), int(&(b->c)),
sizeof(hashtableentry),
int(&(e->key)), int(&(e->data)), int(&(e->data.x)), int(&(e->data.y)), int(&(e->data.a)), int(&(e->data.b)), int(&(e->data.c)),
sizeof(hashset::chain),
int(&(c->elem)), int(&(c->next)), int(&(c->elem.key)), int(&(c->elem.data)), int(&(c->elem.data.x)), int(&(c->elem.data.y)), int(&(c->elem.data.a)), int(&(c->elem.data.b)), int(&(c->elem.data.c))
);
}
};
struct B { char buffer[62]; int last; char laster; char laster2; };
struct Bits {
unsigned short A : 1;
unsigned short B : 1;
unsigned short C : 1;
unsigned short D : 1;
unsigned short x1 : 1;
unsigned short x2 : 1;
unsigned short x3 : 1;
unsigned short x4 : 1;
};
int main() {
hashtable t;
// Part 2 - the char[] should be compressed, BUT have a padding space at the end so the next
// one is aligned properly. Also handle char; char; etc. properly.
B *b = NULL;
printf("*%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n", int(b), int(&(b->buffer)), int(&(b->buffer[0])), int(&(b->buffer[1])), int(&(b->buffer[2])),
int(&(b->last)), int(&(b->laster)), int(&(b->laster2)), sizeof(B));
// Part 3 - bitfields, and small structures
Bits *b2 = NULL;
printf("*%d*\\n", sizeof(Bits));
return 0;
}
'''
if Settings.QUANTUM_SIZE == 1:
# Compressed memory. Note that sizeof() does give the fat sizes, however!
self.do_run(src, '*16,0,1,2,2,3|20,0,1,1,2,3,3,4|24,0,5,0,1,1,2,3,3,4*\n*0,0,0,1,2,62,63,64,72*\n*2*')
else:
# Bloated memory; same layout as C/C++
self.do_run(src, '*16,0,4,8,8,12|20,0,4,4,8,12,12,16|24,0,20,0,4,4,8,12,12,16*\n*0,0,0,1,2,64,68,69,72*\n*2*')
def test_runtimelink(self):
if Building.LLVM_OPTS: return self.skip('LLVM opts will optimize printf into puts in the parent, and the child will still look for puts')
Settings.LINKABLE = 1
self.banned_js_engines = [NODE_JS] # node's global scope behaves differently than everything else, needs investigation FIXME
header = r'''
struct point
{
int x, y;
};
'''
open(os.path.join(self.get_dir(), 'header.h'), 'w').write(header)
supp = r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x+p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
'''
supp_name = os.path.join(self.get_dir(), 'supp.c')
open(supp_name, 'w').write(supp)
main = r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 2
dirname = self.get_dir()
self.build(supp, dirname, supp_name)
shutil.move(supp_name + '.o.js', os.path.join(dirname, 'liblib.so'))
Settings.BUILD_AS_SHARED_LIB = 0
Settings.RUNTIME_LINKED_LIBS = ['liblib.so'];
self.do_run(main, 'supp: 54,2\nmain: 56\nsupp see: 543\nmain see: 76\nok.')
def test_dlfcn_basic(self):
Settings.LINKABLE = 1
lib_src = '''
#include <cstdio>
class Foo {
public:
Foo() {
printf("Constructing lib object.\\n");
}
};
Foo global;
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = '''
#include <cstdio>
#include <dlfcn.h>
class Bar {
public:
Bar() {
printf("Constructing main object.\\n");
}
};
Bar global;
int main() {
dlopen("liblib.so", RTLD_NOW);
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, 'Constructing main object.\nConstructing lib object.\n',
post_build=add_pre_run_and_checks)
def test_dlfcn_qsort(self):
Settings.LINKABLE = 1
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 1 # Needed for unsafe optimizations
lib_src = '''
int lib_cmp(const void* left, const void* right) {
const int* a = (const int*) left;
const int* b = (const int*) right;
if(*a > *b) return 1;
else if(*a == *b) return 0;
else return -1;
}
typedef int (*CMP_TYPE)(const void*, const void*);
extern "C" CMP_TYPE get_cmp() {
return lib_cmp;
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
Settings.EXPORTED_FUNCTIONS = ['_get_cmp']
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
typedef int (*CMP_TYPE)(const void*, const void*);
int main_cmp(const void* left, const void* right) {
const int* a = (const int*) left;
const int* b = (const int*) right;
if(*a < *b) return 1;
else if(*a == *b) return 0;
else return -1;
}
int main() {
void* lib_handle;
CMP_TYPE (*getter_ptr)();
CMP_TYPE lib_cmp_ptr;
int arr[5] = {4, 2, 5, 1, 3};
lib_handle = dlopen("liblib.so", RTLD_NOW);
if (lib_handle == NULL) {
printf("Could not load lib.\\n");
return 1;
}
getter_ptr = (CMP_TYPE (*)()) dlsym(lib_handle, "get_cmp");
if (getter_ptr == NULL) {
printf("Could not find func.\\n");
return 1;
}
lib_cmp_ptr = getter_ptr();
qsort((void*)arr, 5, sizeof(int), main_cmp);
printf("Sort with main comparison: ");
for (int i = 0; i < 5; i++) {
printf("%d ", arr[i]);
}
printf("\\n");
qsort((void*)arr, 5, sizeof(int), lib_cmp_ptr);
printf("Sort with lib comparison: ");
for (int i = 0; i < 5; i++) {
printf("%d ", arr[i]);
}
printf("\\n");
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
Settings.EXPORTED_FUNCTIONS = ['_main']
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, 'Sort with main comparison: 5 4 3 2 1 *Sort with lib comparison: 1 2 3 4 5 *',
output_nicerizer=lambda x: x.replace('\n', '*'),
post_build=add_pre_run_and_checks)
def test_dlfcn_data_and_fptr(self):
if Building.LLVM_OPTS: return self.skip('LLVM opts will optimize out parent_func')
Settings.LINKABLE = 1
lib_src = '''
#include <stdio.h>
int global = 42;
extern void parent_func(); // a function that is defined in the parent
void lib_fptr() {
printf("Second calling lib_fptr from main.\\n");
parent_func();
// call it also through a pointer, to check indexizing
void (*p_f)();
p_f = parent_func;
p_f();
}
extern "C" void (*func(int x, void(*fptr)()))() {
printf("In func: %d\\n", x);
fptr();
return lib_fptr;
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
Settings.EXPORTED_FUNCTIONS = ['_func']
Settings.EXPORTED_GLOBALS = ['_global']
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = '''
#include <stdio.h>
#include <dlfcn.h>
typedef void (*FUNCTYPE(int, void(*)()))();
FUNCTYPE func;
void parent_func() {
printf("parent_func called from child\\n");
}
void main_fptr() {
printf("First calling main_fptr from lib.\\n");
}
int main() {
void* lib_handle;
FUNCTYPE* func_fptr;
// Test basic lib loading.
lib_handle = dlopen("liblib.so", RTLD_NOW);
if (lib_handle == NULL) {
printf("Could not load lib.\\n");
return 1;
}
// Test looked up function.
func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func");
// Load twice to test cache.
func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func");
if (func_fptr == NULL) {
printf("Could not find func.\\n");
return 1;
}
// Test passing function pointers across module bounds.
void (*fptr)() = func_fptr(13, main_fptr);
fptr();
// Test global data.
int* global = (int*) dlsym(lib_handle, "global");
if (global == NULL) {
printf("Could not find global.\\n");
return 1;
}
printf("Var: %d\\n", *global);
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
Settings.EXPORTED_FUNCTIONS = ['_main']
Settings.EXPORTED_GLOBALS = []
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, 'In func: 13*First calling main_fptr from lib.*Second calling lib_fptr from main.*parent_func called from child*parent_func called from child*Var: 42*',
output_nicerizer=lambda x: x.replace('\n', '*'),
post_build=add_pre_run_and_checks)
def test_dlfcn_alias(self):
Settings.LINKABLE = 1
if Building.LLVM_OPTS == 2: return self.skip('LLVM LTO will optimize away stuff we expect from the shared library')
lib_src = r'''
#include <stdio.h>
extern int parent_global;
extern "C" void func() {
printf("Parent global: %d.\n", parent_global);
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
Settings.EXPORTED_FUNCTIONS = ['_func']
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = r'''
#include <dlfcn.h>
int parent_global = 123;
int main() {
void* lib_handle;
void (*fptr)();
lib_handle = dlopen("liblib.so", RTLD_NOW);
fptr = (void (*)())dlsym(lib_handle, "func");
fptr();
parent_global = 456;
fptr();
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
Settings.INCLUDE_FULL_LIBRARY = 1
Settings.EXPORTED_FUNCTIONS = ['_main']
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, 'Parent global: 123.*Parent global: 456.*',
output_nicerizer=lambda x: x.replace('\n', '*'),
post_build=add_pre_run_and_checks,
extra_emscripten_args=['-H', 'libc/fcntl.h,libc/sys/unistd.h,poll.h,libc/math.h,libc/time.h,libc/langinfo.h'])
Settings.INCLUDE_FULL_LIBRARY = 0
def test_dlfcn_varargs(self):
Settings.LINKABLE = 1
if Building.LLVM_OPTS == 2: return self.skip('LLVM LTO will optimize things that prevent shared objects from working')
if Settings.QUANTUM_SIZE == 1: return self.skip('FIXME: Add support for this')
lib_src = r'''
void print_ints(int n, ...);
extern "C" void func() {
print_ints(2, 13, 42);
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
Settings.EXPORTED_FUNCTIONS = ['_func']
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = r'''
#include <stdarg.h>
#include <stdio.h>
#include <dlfcn.h>
void print_ints(int n, ...) {
va_list args;
va_start(args, n);
for (int i = 0; i < n; i++) {
printf("%d\n", va_arg(args, int));
}
va_end(args);
}
int main() {
void* lib_handle;
void (*fptr)();
print_ints(2, 100, 200);
lib_handle = dlopen("liblib.so", RTLD_NOW);
fptr = (void (*)())dlsym(lib_handle, "func");
fptr();
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
Settings.EXPORTED_FUNCTIONS = ['_main']
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, '100\n200\n13\n42\n',
post_build=add_pre_run_and_checks)
def test_rand(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
printf("%d\n", rand());
printf("%d\n", rand());
srand(123);
printf("%d\n", rand());
printf("%d\n", rand());
srand(123);
printf("%d\n", rand());
printf("%d\n", rand());
unsigned state = 0;
int r;
r = rand_r(&state);
printf("%d, %u\n", r, state);
r = rand_r(&state);
printf("%d, %u\n", r, state);
state = 0;
r = rand_r(&state);
printf("%d, %u\n", r, state);
return 0;
}
'''
expected = '''
1250496027
1116302336
440917656
1476150784
440917656
1476150784
12345, 12345
1406932606, 3554416254
12345, 12345
'''
self.do_run(src, re.sub(r'(^|\n)\s+', r'\1', expected))
def test_strtod(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
char* endptr;
printf("\n");
printf("%g\n", strtod("0", &endptr));
printf("%g\n", strtod("0.", &endptr));
printf("%g\n", strtod("0.0", &endptr));
printf("%g\n", strtod("1", &endptr));
printf("%g\n", strtod("1.", &endptr));
printf("%g\n", strtod("1.0", &endptr));
printf("%g\n", strtod("123", &endptr));
printf("%g\n", strtod("123.456", &endptr));
printf("%g\n", strtod("-123.456", &endptr));
printf("%g\n", strtod("1234567891234567890", &endptr));
printf("%g\n", strtod("1234567891234567890e+50", &endptr));
printf("%g\n", strtod("84e+220", &endptr));
printf("%g\n", strtod("123e-50", &endptr));
printf("%g\n", strtod("123e-250", &endptr));
printf("%g\n", strtod("123e-450", &endptr));
char str[] = " 12.34e56end";
printf("%g\n", strtod(str, &endptr));
printf("%d\n", endptr - str);
printf("%g\n", strtod("84e+420", &endptr));
return 0;
}
'''
expected = '''
0
0
0
1
1
1
123
123.456
-123.456
1.23457e+18
1.23457e+68
8.4e+221
1.23e-48
1.23e-248
0
1.234e+57
10
inf
'''
self.do_run(src, re.sub(r'\n\s+', '\n', expected))
def test_strtok(self):
src = r'''
#include<stdio.h>
#include<string.h>
int main() {
char test[80], blah[80];
char *sep = "\\/:;=-";
char *word, *phrase, *brkt, *brkb;
strcpy(test, "This;is.a:test:of=the/string\\tokenizer-function.");
for (word = strtok_r(test, sep, &brkt); word; word = strtok_r(NULL, sep, &brkt)) {
strcpy(blah, "blah:blat:blab:blag");
for (phrase = strtok_r(blah, sep, &brkb); phrase; phrase = strtok_r(NULL, sep, &brkb)) {
printf("at %s:%s\n", word, phrase);
}
}
return 1;
}
'''
expected = '''at This:blah
at This:blat
at This:blab
at This:blag
at is.a:blah
at is.a:blat
at is.a:blab
at is.a:blag
at test:blah
at test:blat
at test:blab
at test:blag
at of:blah
at of:blat
at of:blab
at of:blag
at the:blah
at the:blat
at the:blab
at the:blag
at string:blah
at string:blat
at string:blab
at string:blag
at tokenizer:blah
at tokenizer:blat
at tokenizer:blab
at tokenizer:blag
at function.:blah
at function.:blat
at function.:blab
at function.:blag
'''
self.do_run(src, expected)
def test_parseInt(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('i64 mode 1 requires ta2')
if Settings.QUANTUM_SIZE == 1: return self.skip('Q1 and I64_1 do not mix well yet')
src = open(path_from_root('tests', 'parseInt', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'parseInt', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_transtrcase(self):
src = '''
#include <stdio.h>
#include <string.h>
int main() {
char szToupr[] = "hello, ";
char szTolwr[] = "EMSCRIPTEN";
strupr(szToupr);
strlwr(szTolwr);
printf(szToupr);
printf(szTolwr);
return 0;
}
'''
self.do_run(src, 'HELLO, emscripten')
def test_printf(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('i64 mode 1 requires ta2')
self.banned_js_engines = [NODE_JS, V8_ENGINE] # SpiderMonkey and V8 do different things to float64 typed arrays, un-NaNing, etc.
src = open(path_from_root('tests', 'printf', 'test.c'), 'r').read()
expected = [open(path_from_root('tests', 'printf', 'output.txt'), 'r').read(),
open(path_from_root('tests', 'printf', 'output_i64_1.txt'), 'r').read()]
self.do_run(src, expected)
def test_printf_types(self):
src = r'''
#include <stdio.h>
int main() {
char c = '1';
short s = 2;
int i = 3;
long long l = 4;
float f = 5.5;
double d = 6.6;
printf("%c,%hd,%d,%lld,%.1f,%.1llf\n", c, s, i, l, f, d);
return 0;
}
'''
self.do_run(src, '1,2,3,4,5.5,6.6\n')
def test_vprintf(self):
src = r'''
#include <stdio.h>
#include <stdarg.h>
void print(char* format, ...) {
va_list args;
va_start (args, format);
vprintf (format, args);
va_end (args);
}
int main () {
print("Call with %d variable argument.\n", 1);
print("Call with %d variable %s.\n", 2, "arguments");
return 0;
}
'''
expected = '''
Call with 1 variable argument.
Call with 2 variable arguments.
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_atoi(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main () {
printf("%d*", atoi(""));
printf("%d*", atoi("a"));
printf("%d*", atoi(" b"));
printf("%d*", atoi(" c "));
printf("%d*", atoi("6"));
printf("%d*", atoi(" 5"));
printf("%d*", atoi("4 "));
printf("%d*", atoi("3 6"));
printf("%d*", atoi(" 3 7"));
printf("%d*", atoi("9 d"));
printf("%d\n", atoi(" 8 e"));
return 0;
}
'''
self.do_run(src, '0*0*0*0*6*5*4*3*3*9*8')
def test_sscanf(self):
src = r'''
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
int main () {
#define CHECK(str) \
{ \
char name[1000]; \
memset(name, 0, 1000); \
int prio = 99; \
sscanf(str, "%s %d", name, &prio); \
printf("%s : %d\n", name, prio); \
}
CHECK("en-us 2");
CHECK("en-r");
CHECK("en 3");
printf("%f, %f\n", atof("1.234567"), atof("cheez"));
float a = -1;
sscanf("-3.03", "%f", &a);
printf("%.4f\n", a);
return 0;
}
'''
self.do_run(src, 'en-us : 2\nen-r : 99\nen : 3\n1.234567, 0.000000\n-3.0300')
# Part 2: doubles
if Settings.USE_TYPED_ARRAYS == 2:
for ftype in ['float', 'double']:
src = r'''
#include <stdio.h>
int main(){
char strval1[] = "1.2345678901";
char strval2[] = "1.23456789e5";
char strval3[] = "1.23456789E5";
char strval4[] = "1.2345678e-5";
char strval5[] = "1.2345678E-5";
double dblval = 1.2345678901;
double tstval;
sscanf(strval1, "%lf", &tstval);
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval2, "%lf", &tstval);
dblval = 123456.789;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval3, "%lf", &tstval);
dblval = 123456.789;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval4, "%lf", &tstval);
dblval = 0.000012345678;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval5, "%lf", &tstval);
dblval = 0.000012345678;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
return 0;
}
'''
if ftype == 'float':
self.do_run(src.replace('%lf', '%f').replace('double', 'float'), '''Pass: 1.234568 1.234568
Pass: 123456.789063 123456.789063
Pass: 123456.789063 123456.789063
Pass: 0.000012 0.000012
Pass: 0.000012 0.000012''')
else:
self.do_run(src, '''Pass: 1.234568 1.234568
Pass: 123456.789000 123456.789000
Pass: 123456.789000 123456.789000
Pass: 0.000012 0.000012
Pass: 0.000012 0.000012''')
def test_langinfo(self):
src = open(path_from_root('tests', 'langinfo', 'test.c'), 'r').read()
expected = open(path_from_root('tests', 'langinfo', 'output.txt'), 'r').read()
self.do_run(src, expected, extra_emscripten_args=['-H', 'libc/langinfo.h'])
def test_files(self):
if self.emcc_args is not None and '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here, to test we don't break FS stuff
Settings.CORRECT_SIGNS = 1 # Just so our output is what we expect. Can flip them both.
post = '''
def process(filename):
src = \'\'\'
var Module = {
'noFSInit': true,
'preRun': function() {
FS.createDataFile('/', 'somefile.binary', [100, 200, 50, 25, 10, 77, 123], true, false); // 200 becomes -56, since signed chars are used in memory
FS.createLazyFile('/', 'test.file', 'test.file', true, false);
var test_files_input = 'hi there!';
var test_files_input_index = 0;
FS.init(function() {
return test_files_input.charCodeAt(test_files_input_index++) || null;
});
}
};
\'\'\' + open(filename, 'r').read()
open(filename, 'w').write(src)
'''
other = open(os.path.join(self.get_dir(), 'test.file'), 'w')
other.write('some data');
other.close()
src = open(path_from_root('tests', 'files.cpp'), 'r').read()
self.do_run(src, 'size: 7\ndata: 100,-56,50,25,10,77,123\nloop: 100 -56 50 25 10 77 123 \ninput:hi there!\ntexto\ntexte\n$\n5 : 10,30,20,11,88\nother=some data.\nseeked=me da.\nseeked=ata.\nseeked=ta.\nfscanfed: 10 - hello\n',
post_build=post, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_files_m(self):
# Test for Module.stdin etc.
Settings.CORRECT_SIGNS = 1
post = '''
def process(filename):
src = \'\'\'
var data = [10, 20, 40, 30];
var Module = {
stdin: function() { return data.pop() || null },
stdout: function(x) { Module.print('got: ' + x) }
};
\'\'\' + open(filename, 'r').read()
open(filename, 'w').write(src)
'''
src = r'''
#include <stdio.h>
#include <unistd.h>
int main () {
char c;
fprintf(stderr, "isatty? %d,%d,%d\n", isatty(fileno(stdin)), isatty(fileno(stdout)), isatty(fileno(stderr)));
while ((c = fgetc(stdin)) != EOF) {
putc(c+5, stdout);
}
return 0;
}
'''
self.do_run(src, 'isatty? 0,0,1\ngot: 35\ngot: 45\ngot: 25\ngot: 15\n', post_build=post)
def test_folders(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
\'\'\'
FS.createFolder('/', 'test', true, false);
FS.createPath('/', 'test/hello/world/', true, false);
FS.createPath('/test', 'goodbye/world/', true, false);
FS.createPath('/test/goodbye', 'noentry', false, false);
FS.createDataFile('/test', 'freeforall.ext', 'abc', true, true);
FS.createDataFile('/test', 'restricted.ext', 'def', false, false);
\'\'\'
)
open(filename, 'w').write(src)
'''
src = r'''
#include <stdio.h>
#include <dirent.h>
#include <errno.h>
int main() {
struct dirent *e;
// Basic correct behaviour.
DIR* d = opendir("/test");
printf("--E: %d\n", errno);
while ((e = readdir(d))) puts(e->d_name);
printf("--E: %d\n", errno);
// Empty folder; tell/seek.
puts("****");
d = opendir("/test/hello/world/");
e = readdir(d);
puts(e->d_name);
int pos = telldir(d);
e = readdir(d);
puts(e->d_name);
seekdir(d, pos);
e = readdir(d);
puts(e->d_name);
// Errors.
puts("****");
printf("--E: %d\n", errno);
d = opendir("/test/goodbye/noentry");
printf("--E: %d, D: %d\n", errno, d);
d = opendir("/i/dont/exist");
printf("--E: %d, D: %d\n", errno, d);
d = opendir("/test/freeforall.ext");
printf("--E: %d, D: %d\n", errno, d);
while ((e = readdir(d))) puts(e->d_name);
printf("--E: %d\n", errno);
return 0;
}
'''
expected = '''
--E: 0
.
..
hello
goodbye
freeforall.ext
restricted.ext
--E: 0
****
.
..
..
****
--E: 0
--E: 13, D: 0
--E: 2, D: 0
--E: 20, D: 0
--E: 9
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected), post_build=add_pre_run)
def test_stat(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
\'\'\'
var f1 = FS.createFolder('/', 'test', true, true);
var f2 = FS.createDataFile(f1, 'file', 'abcdef', true, true);
var f3 = FS.createLink(f1, 'link', 'file', true, true);
var f4 = FS.createDevice(f1, 'device', function(){}, function(){});
f1.timestamp = f2.timestamp = f3.timestamp = f4.timestamp = new Date(1200000000000);
\'\'\'
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'stat', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'stat', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_fcntl(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createDataFile('/', 'test', 'abcdef', true, true);"
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'fcntl', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'fcntl', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_fcntl_open(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
\'\'\'
FS.createDataFile('/', 'test-file', 'abcdef', true, true);
FS.createFolder('/', 'test-folder', true, true);
\'\'\'
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'fcntl-open', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'fcntl-open', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_fcntl_misc(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createDataFile('/', 'test', 'abcdef', true, true);"
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'fcntl-misc', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'fcntl-misc', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_poll(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
\'\'\'
FS.createDataFile('/', 'file', 'abcdef', true, true);
FS.createDevice('/', 'device', function() {}, function() {});
\'\'\'
)
open(filename, 'w').write(src)
'''
src = r'''
#include <stdio.h>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
int main() {
struct pollfd multi[5];
multi[0].fd = open("/file", O_RDONLY, 0777);
multi[1].fd = open("/device", O_RDONLY, 0777);
multi[2].fd = 123;
multi[3].fd = open("/file", O_RDONLY, 0777);
multi[4].fd = open("/file", O_RDONLY, 0777);
multi[0].events = POLLIN | POLLOUT | POLLNVAL | POLLERR;
multi[1].events = POLLIN | POLLOUT | POLLNVAL | POLLERR;
multi[2].events = POLLIN | POLLOUT | POLLNVAL | POLLERR;
multi[3].events = 0x00;
multi[4].events = POLLOUT | POLLNVAL | POLLERR;
printf("ret: %d\n", poll(multi, 5, 123));
printf("errno: %d\n", errno);
printf("multi[0].revents: %d\n", multi[0].revents == (POLLIN | POLLOUT));
printf("multi[1].revents: %d\n", multi[1].revents == (POLLIN | POLLOUT));
printf("multi[2].revents: %d\n", multi[2].revents == POLLNVAL);
printf("multi[3].revents: %d\n", multi[3].revents == 0);
printf("multi[4].revents: %d\n", multi[4].revents == POLLOUT);
return 0;
}
'''
expected = r'''
ret: 4
errno: 0
multi[0].revents: 1
multi[1].revents: 1
multi[2].revents: 1
multi[3].revents: 1
multi[4].revents: 1
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected), post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h,poll.h'])
def test_statvfs(self):
src = r'''
#include <stdio.h>
#include <errno.h>
#include <sys/statvfs.h>
int main() {
struct statvfs s;
printf("result: %d\n", statvfs("/test", &s));
printf("errno: %d\n", errno);
printf("f_bsize: %lu\n", s.f_bsize);
printf("f_frsize: %lu\n", s.f_frsize);
printf("f_blocks: %lu\n", s.f_blocks);
printf("f_bfree: %lu\n", s.f_bfree);
printf("f_bavail: %lu\n", s.f_bavail);
printf("f_files: %lu\n", s.f_files);
printf("f_ffree: %lu\n", s.f_ffree);
printf("f_favail: %lu\n", s.f_favail);
printf("f_fsid: %lu\n", s.f_fsid);
printf("f_flag: %lu\n", s.f_flag);
printf("f_namemax: %lu\n", s.f_namemax);
return 0;
}
'''
expected = r'''
result: 0
errno: 0
f_bsize: 4096
f_frsize: 4096
f_blocks: 1000000
f_bfree: 500000
f_bavail: 500000
f_files: 10
f_ffree: 1000000
f_favail: 1000000
f_fsid: 42
f_flag: 2
f_namemax: 255
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_libgen(self):
src = r'''
#include <stdio.h>
#include <libgen.h>
int main() {
char p1[16] = "/usr/lib", p1x[16] = "/usr/lib";
printf("%s -> ", p1);
printf("%s : %s\n", dirname(p1x), basename(p1));
char p2[16] = "/usr", p2x[16] = "/usr";
printf("%s -> ", p2);
printf("%s : %s\n", dirname(p2x), basename(p2));
char p3[16] = "/usr/", p3x[16] = "/usr/";
printf("%s -> ", p3);
printf("%s : %s\n", dirname(p3x), basename(p3));
char p4[16] = "/usr/lib///", p4x[16] = "/usr/lib///";
printf("%s -> ", p4);
printf("%s : %s\n", dirname(p4x), basename(p4));
char p5[16] = "/", p5x[16] = "/";
printf("%s -> ", p5);
printf("%s : %s\n", dirname(p5x), basename(p5));
char p6[16] = "///", p6x[16] = "///";
printf("%s -> ", p6);
printf("%s : %s\n", dirname(p6x), basename(p6));
char p7[16] = "/usr/../lib/..", p7x[16] = "/usr/../lib/..";
printf("%s -> ", p7);
printf("%s : %s\n", dirname(p7x), basename(p7));
char p8[16] = "", p8x[16] = "";
printf("(empty) -> %s : %s\n", dirname(p8x), basename(p8));
printf("(null) -> %s : %s\n", dirname(0), basename(0));
return 0;
}
'''
expected = '''
/usr/lib -> /usr : lib
/usr -> / : usr
/usr/ -> / : usr
/usr/lib/// -> /usr : lib
/ -> / : /
/// -> / : /
/usr/../lib/.. -> /usr/../lib : ..
(empty) -> . : .
(null) -> . : .
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_utime(self):
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
\'\'\'
var TEST_F1 = FS.createFolder('/', 'writeable', true, true);
var TEST_F2 = FS.createFolder('/', 'unwriteable', true, false);
\'\'\'
).replace(
'// {{POST_RUN_ADDITIONS}}',
\'\'\'
Module.print('first changed: ' + (TEST_F1.timestamp == 1200000000000));
Module.print('second changed: ' + (TEST_F2.timestamp == 1200000000000));
\'\'\'
)
open(filename, 'w').write(src)
'''
src = r'''
#include <stdio.h>
#include <errno.h>
#include <utime.h>
int main() {
struct utimbuf t = {1000000000, 1200000000};
char* writeable = "/writeable";
char* unwriteable = "/unwriteable";
utime(writeable, &t);
printf("writeable errno: %d\n", errno);
utime(unwriteable, &t);
printf("unwriteable errno: %d\n", errno);
return 0;
}
'''
expected = '''
writeable errno: 0
unwriteable errno: 1
first changed: true
second changed: false
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected), post_build=add_pre_run_and_checks)
def test_direct_string_constant_usage(self):
if self.emcc_args is None: return self.skip('requires libcxx')
src = '''
#include <iostream>
template<int i>
void printText( const char (&text)[ i ] )
{
std::cout << text;
}
int main()
{
printText( "some string constant" );
return 0;
}
'''
self.do_run(src, "some string constant")
def test_istream(self):
if self.emcc_args is None: return self.skip('requires libcxx')
src = '''
#include <string>
#include <sstream>
#include <iostream>
int main()
{
std::string mystring("1 2 3");
std::istringstream is(mystring);
int one, two, three;
is >> one >> two >> three;
printf( "%i %i %i", one, two, three );
}
'''
for linkable in [0, 1]:
Settings.LINKABLE = linkable # regression check for issue #273
self.do_run(src, "1 2 3")
def test_readdir(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createFolder('', 'test', true, true);\\nFS.createLazyFile( 'test', 'some_file', 'http://localhost/some_file', true, false);\\nFS.createFolder('test', 'some_directory', true, true);"
)
open(filename, 'w').write(src)
'''
src = '''
#include <dirent.h>
#include <stdio.h>
int main()
{
DIR * dir;
dirent * entity;
dir = opendir( "test" );
while( ( entity = readdir( dir ) ) )
{
printf( "%s is a %s\\n", entity->d_name, entity->d_type & DT_DIR ? "directory" : "file" );
}
return 0;
}
'''
self.do_run(src, ". is a directory\n.. is a directory\nsome_file is a file\nsome_directory is a directory", post_build=add_pre_run)
def test_fs_base(self):
Settings.INCLUDE_FULL_LIBRARY = 1
try:
addJS = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace('FS.init();', '').replace( # Disable normal initialization, replace with ours
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'filesystem', 'src.js'), 'r').read())
open(filename, 'w').write(src)
'''
src = 'int main() {return 0;}\n'
expected = open(path_from_root('tests', 'filesystem', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=addJS, extra_emscripten_args=['-H', 'libc/fcntl.h,libc/sys/unistd.h,poll.h,libc/math.h,libc/langinfo.h,libc/time.h'])
finally:
Settings.INCLUDE_FULL_LIBRARY = 0
def test_unistd_access(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'access.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'access.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'access.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_curdir(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'curdir.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'curdir.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'curdir.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_close(self):
src = open(path_from_root('tests', 'unistd', 'close.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'close.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_confstr(self):
src = open(path_from_root('tests', 'unistd', 'confstr.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'confstr.out'), 'r').read()
self.do_run(src, expected, extra_emscripten_args=['-H', 'libc/unistd.h'])
def test_unistd_ttyname(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'ttyname.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'ttyname.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'ttyname.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_dup(self):
src = open(path_from_root('tests', 'unistd', 'dup.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'dup.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_pathconf(self):
src = open(path_from_root('tests', 'unistd', 'pathconf.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'pathconf.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_truncate(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'truncate.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'truncate.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'truncate.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_swab(self):
src = open(path_from_root('tests', 'unistd', 'swab.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'swab.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_isatty(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'isatty.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'isatty.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'isatty.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_sysconf(self):
src = open(path_from_root('tests', 'unistd', 'sysconf.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'sysconf.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_login(self):
src = open(path_from_root('tests', 'unistd', 'login.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'login.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_unlink(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'unlink.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'unlink.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'unlink.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_links(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'links.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'links.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'links.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_sleep(self):
src = open(path_from_root('tests', 'unistd', 'sleep.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'sleep.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_io(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'io.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'io.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'io.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_misc(self):
src = open(path_from_root('tests', 'unistd', 'misc.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'misc.out'), 'r').read()
self.do_run(src, expected)
def test_uname(self):
src = r'''
#include <stdio.h>
#include <sys/utsname.h>
int main() {
struct utsname u;
printf("ret: %d\n", uname(&u));
printf("sysname: %s\n", u.sysname);
printf("nodename: %s\n", u.nodename);
printf("release: %s\n", u.release);
printf("version: %s\n", u.version);
printf("machine: %s\n", u.machine);
printf("invalid: %d\n", uname(0));
return 0;
}
'''
expected = '''
ret: 0
sysname: Emscripten
nodename: emscripten
release: 1.0
version: #1
machine: x86-JS
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_env(self):
src = open(path_from_root('tests', 'env', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'env', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_systypes(self):
src = open(path_from_root('tests', 'systypes', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'systypes', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_getloadavg(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
double load[5] = {42.13, 42.13, 42.13, 42.13, 42.13};
printf("ret: %d\n", getloadavg(load, 5));
printf("load[0]: %.3lf\n", load[0]);
printf("load[1]: %.3lf\n", load[1]);
printf("load[2]: %.3lf\n", load[2]);
printf("load[3]: %.3lf\n", load[3]);
printf("load[4]: %.3lf\n", load[4]);
return 0;
}
'''
expected = '''
ret: 3
load[0]: 0.100
load[1]: 0.100
load[2]: 0.100
load[3]: 42.130
load[4]: 42.130
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_inet(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
printf("*%x,%x,%x,%x*\n", htonl(0x12345678), htons(0xabcd), ntohl(0x43211234), ntohs(0xbeaf));
return 0;
}
'''
self.do_run(src, '*78563412,cdab,34122143,afbe*')
def test_ctype(self):
# The bit fiddling done by the macros using __ctype_b_loc requires this.
Settings.CORRECT_SIGNS = 1
src = open(path_from_root('tests', 'ctype', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'ctype', 'output.txt'), 'r').read()
self.do_run(src, expected)
CORRECT_SIGNS = 0
def test_atomic(self):
src = '''
#include <stdio.h>
int main() {
int x = 10;
int y = __sync_add_and_fetch(&x, 5);
printf("*%d,%d*\\n", x, y);
x = 10;
y = __sync_fetch_and_add(&x, 5);
printf("*%d,%d*\\n", x, y);
x = 10;
y = __sync_lock_test_and_set(&x, 6);
printf("*%d,%d*\\n", x, y);
x = 10;
y = __sync_bool_compare_and_swap(&x, 9, 7);
printf("*%d,%d*\\n", x, y);
y = __sync_bool_compare_and_swap(&x, 10, 7);
printf("*%d,%d*\\n", x, y);
return 0;
}
'''
self.do_run(src, '*15,15*\n*15,10*\n*6,10*\n*10,0*\n*7,1*')
# libc++ tests
def test_iostream(self):
if Settings.QUANTUM_SIZE == 1: return self.skip("we don't support libcxx in q1")
if self.emcc_args is None:
if Building.LLVM_OPTS: return self.skip('optimizing bitcode before emcc can confuse libcxx inclusion')
self.emcc_args = [] # libc++ auto-inclusion is only done if we use emcc
Settings.SAFE_HEAP = 0 # Some spurious warnings from libc++ internals
src = '''
#include <iostream>
int main()
{
std::cout << "hello world" << std::endl << 77 << "." << std::endl;
return 0;
}
'''
# FIXME: should not have so many newlines in output here
self.do_run(src, 'hello world\n77.\n')
def test_stdvec(self):
src = '''
#include <vector>
#include <stdio.h>
struct S {
int a;
float b;
};
void foo(int a, float b)
{
printf("%d:%.2f\\n", a, b);
}
int main ( int argc, char *argv[] )
{
std::vector<S> ar;
S s;
s.a = 789;
s.b = 123.456f;
ar.push_back(s);
s.a = 0;
s.b = 100.1f;
ar.push_back(s);
foo(ar[0].a, ar[0].b);
foo(ar[1].a, ar[1].b);
}
'''
self.do_run(src, '789:123.46\n0:100.1')
### 'Medium' tests
def test_fannkuch(self):
results = [ (1,0), (2,1), (3,2), (4,4), (5,7), (6,10), (7, 16), (8,22) ]
for i, j in results:
src = open(path_from_root('tests', 'fannkuch.cpp'), 'r').read()
self.do_run(src, 'Pfannkuchen(%d) = %d.' % (i,j), [str(i)], no_build=i>1)
def test_raytrace(self):
if Settings.USE_TYPED_ARRAYS == 2: return self.skip('Relies on double value rounding, extremely sensitive')
src = open(path_from_root('tests', 'raytrace.cpp'), 'r').read().replace('double', 'float')
output = open(path_from_root('tests', 'raytrace.ppm'), 'r').read()
self.do_run(src, output, ['3', '16'])#, build_ll_hook=self.do_autodebug)
def test_fasta(self):
results = [ (1,'''GG*ctt**tgagc*'''), (20,'''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tacgtgtagcctagtgtttgtgttgcgttatagtctatttgtggacacagtatggtcaaa**tgacgtcttttgatctgacggcgttaacaaagatactctg*'''),
(50,'''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA*TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACAT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tactDtDagcctatttSVHtHttKtgtHMaSattgWaHKHttttagacatWatgtRgaaa**NtactMcSMtYtcMgRtacttctWBacgaa**agatactctgggcaacacacatacttctctcatgttgtttcttcggacctttcataacct**ttcctggcacatggttagctgcacatcacaggattgtaagggtctagtggttcagtgagc**ggaatatcattcgtcggtggtgttaatctatctcggtgtagcttataaatgcatccgtaa**gaatattatgtttatttgtcggtacgttcatggtagtggtgtcgccgatttagacgtaaa**ggcatgtatg*''') ]
for i, j in results:
src = open(path_from_root('tests', 'fasta.cpp'), 'r').read()
self.do_run(src, j, [str(i)], lambda x: x.replace('\n', '*'), no_build=i>1)
def test_dlmalloc(self):
if self.emcc_args is None: self.emcc_args = [] # dlmalloc auto-inclusion is only done if we use emcc
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = ['src.cpp:' + str(i+4) for i in [4816, 4191, 4246, 4199, 4205, 4235, 4227]]
Settings.TOTAL_MEMORY = 100*1024*1024 # needed with typed arrays
src = open(path_from_root('system', 'lib', 'dlmalloc.c'), 'r').read() + '\n\n\n' + open(path_from_root('tests', 'dlmalloc_test.c'), 'r').read()
self.do_run(src, '*1,0*', ['200', '1'])
self.do_run(src, '*400,0*', ['400', '400'], no_build=True)
# Linked version
src = open(path_from_root('tests', 'dlmalloc_test.c'), 'r').read()
self.do_run(src, '*1,0*', ['200', '1'], extra_emscripten_args=['-m'])
self.do_run(src, '*400,0*', ['400', '400'], extra_emscripten_args=['-m'], no_build=True)
if self.emcc_args == []: # TODO: do this in other passes too, passing their opts into emcc
# emcc should build in dlmalloc automatically, and do all the sign correction etc. for it
try_delete(os.path.join(self.get_dir(), 'src.cpp.o.js'))
output = Popen(['python', EMCC, path_from_root('tests', 'dlmalloc_test.c'),
'-o', os.path.join(self.get_dir(), 'src.cpp.o.js')], stdout=PIPE, stderr=self.stderr_redirect).communicate()
self.do_run('x', '*1,0*', ['200', '1'], no_build=True)
self.do_run('x', '*400,0*', ['400', '400'], no_build=True)
# The same for new and all its variants
src = open(path_from_root('tests', 'new.cpp')).read()
for new, delete in [
('malloc(100)', 'free'),
('new char[100]', 'delete[]'),
('new Structy', 'delete'),
('new int', 'delete'),
('new Structy[10]', 'delete[]'),
]:
self.do_run(src.replace('{{{ NEW }}}', new).replace('{{{ DELETE }}}', delete), '*1,0*')
def test_libcxx(self):
self.do_run(open(path_from_root('tests', 'hashtest.cpp')).read(),
'june -> 30\nPrevious (in alphabetical order) is july\nNext (in alphabetical order) is march')
self.do_run('''
#include <set>
#include <stdio.h>
int main() {
std::set<int> *fetchOriginatorNums = new std::set<int>();
fetchOriginatorNums->insert(171);
printf("hello world\\n");
return 1;
}
''', 'hello world');
def test_static_variable(self):
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # LLVM mixes i64 and i8 in the guard check
src = '''
#include <stdio.h>
struct DATA
{
int value;
DATA()
{
value = 0;
}
};
DATA & GetData()
{
static DATA data;
return data;
}
int main()
{
GetData().value = 10;
printf( "value:%i", GetData().value );
}
'''
self.do_run(src, 'value:10')
def test_mmap(self):
src = '''
#include <stdio.h>
#include <sys/mman.h>
#include <assert.h>
int main(int argc, char *argv[]) {
const int NUM_BYTES = 8 * 1024 * 1024;
const int NUM_INTS = NUM_BYTES / sizeof(int);
int* map = (int*)mmap(0, NUM_BYTES, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANON, -1, 0);
assert(map != MAP_FAILED);
int i;
for (i = 0; i < NUM_INTS; i++) {
map[i] = i;
}
for (i = 0; i < NUM_INTS; i++) {
assert(map[i] == i);
}
assert(munmap(map, NUM_BYTES) == 0);
printf("hello,world");
return 0;
}
'''
self.do_run(src, 'hello,world')
self.do_run(src, 'hello,world', force_c=True)
def test_cubescript(self):
if self.emcc_args is not None and '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage
Building.COMPILER_TEST_OPTS = [] # remove -g, so we have one test without it by default
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # Has some actual loads of unwritten-to places, in the C++ code...
# Overflows happen in hash loop
Settings.CORRECT_OVERFLOWS = 1
Settings.CHECK_OVERFLOWS = 0
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 1
self.do_run(path_from_root('tests', 'cubescript'), '*\nTemp is 33\n9\n5\nhello, everyone\n*', main_file='command.cpp')
def test_gcc_unmangler(self):
self.do_run(path_from_root('third_party'), '*d_demangle(char const*, int, unsigned int*)*', args=['_ZL10d_demanglePKciPj'], main_file='gcc_demangler.c')
#### Code snippet that is helpful to search for nonportable optimizations ####
#global LLVM_OPT_OPTS
#for opt in ['-aa-eval', '-adce', '-always-inline', '-argpromotion', '-basicaa', '-basiccg', '-block-placement', '-break-crit-edges', '-codegenprepare', '-constmerge', '-constprop', '-correlated-propagation', '-count-aa', '-dce', '-deadargelim', '-deadtypeelim', '-debug-aa', '-die', '-domfrontier', '-domtree', '-dse', '-extract-blocks', '-functionattrs', '-globaldce', '-globalopt', '-globalsmodref-aa', '-gvn', '-indvars', '-inline', '-insert-edge-profiling', '-insert-optimal-edge-profiling', '-instcombine', '-instcount', '-instnamer', '-internalize', '-intervals', '-ipconstprop', '-ipsccp', '-iv-users', '-jump-threading', '-lazy-value-info', '-lcssa', '-lda', '-libcall-aa', '-licm', '-lint', '-live-values', '-loop-deletion', '-loop-extract', '-loop-extract-single', '-loop-index-split', '-loop-reduce', '-loop-rotate', '-loop-unroll', '-loop-unswitch', '-loops', '-loopsimplify', '-loweratomic', '-lowerinvoke', '-lowersetjmp', '-lowerswitch', '-mem2reg', '-memcpyopt', '-memdep', '-mergefunc', '-mergereturn', '-module-debuginfo', '-no-aa', '-no-profile', '-partial-inliner', '-partialspecialization', '-pointertracking', '-postdomfrontier', '-postdomtree', '-preverify', '-prune-eh', '-reassociate', '-reg2mem', '-regions', '-scalar-evolution', '-scalarrepl', '-sccp', '-scev-aa', '-simplify-libcalls', '-simplify-libcalls-halfpowr', '-simplifycfg', '-sink', '-split-geps', '-sretpromotion', '-strip', '-strip-dead-debug-info', '-strip-dead-prototypes', '-strip-debug-declare', '-strip-nondebug', '-tailcallelim', '-tailduplicate', '-targetdata', '-tbaa']:
# LLVM_OPT_OPTS = [opt]
# try:
# self.do_run(path_from_root(['third_party']), '*d_demangle(char const*, int, unsigned int*)*', args=['_ZL10d_demanglePKciPj'], main_file='gcc_demangler.c')
# print opt, "ok"
# except:
# print opt, "FAIL"
def test_lua(self):
try:
os.environ['EMCC_LEAVE_INPUTS_RAW'] = '1'
if Settings.QUANTUM_SIZE == 1: return self.skip('TODO: make this work')
# Overflows in luaS_newlstr hash loop
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # Has various warnings, with copied HEAP_HISTORY values (fixed if we copy 'null' as the type)
Settings.CORRECT_OVERFLOWS = 1
Settings.CHECK_OVERFLOWS = 0
Settings.CORRECT_SIGNS = 1 # Not sure why, but needed
Settings.INIT_STACK = 1 # TODO: Investigate why this is necessary
self.do_ll_run(path_from_root('tests', 'lua', 'lua.ll'),
'hello lua world!\n17\n1\n2\n3\n4\n7',
args=['-e', '''print("hello lua world!");print(17);for x = 1,4 do print(x) end;print(10-3)'''],
output_nicerizer=lambda string: string.replace('\n\n', '\n').replace('\n\n', '\n'),
extra_emscripten_args=['-H', 'libc/fcntl.h,libc/sys/unistd.h,poll.h,libc/math.h,libc/langinfo.h,libc/time.h'])
finally:
del os.environ['EMCC_LEAVE_INPUTS_RAW']
def get_freetype(self):
Settings.INIT_STACK = 1 # TODO: Investigate why this is necessary
return self.get_library('freetype', os.path.join('objs', '.libs', 'libfreetype.a'))
def test_freetype(self):
if Settings.QUANTUM_SIZE == 1: return self.skip('TODO: Figure out and try to fix')
if Settings.CORRECT_SIGNS == 0: Settings.CORRECT_SIGNS = 1 # Not sure why, but needed
post = '''
def process(filename):
import tools.shared as shared
# Embed the font into the document
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createDataFile('/', 'font.ttf', %s, true, false);" % str(
map(ord, open(shared.path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), 'rb').read())
)
)
open(filename, 'w').write(src)
'''
# Not needed for js, but useful for debugging
shutil.copyfile(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), os.path.join(self.get_dir(), 'font.ttf'))
# Main
self.do_run(open(path_from_root('tests', 'freetype', 'main.c'), 'r').read(),
open(path_from_root('tests', 'freetype', 'ref.txt'), 'r').read(),
['font.ttf', 'test!', '150', '120', '25'],
libraries=self.get_freetype(),
includes=[path_from_root('tests', 'freetype', 'include')],
post_build=post)
#build_ll_hook=self.do_autodebug)
# github issue 324
print '[issue 324]'
self.do_run(open(path_from_root('tests', 'freetype', 'main_2.c'), 'r').read(),
open(path_from_root('tests', 'freetype', 'ref_2.txt'), 'r').read(),
['font.ttf', 'w', '32', '32', '25'],
libraries=self.get_freetype(),
includes=[path_from_root('tests', 'freetype', 'include')],
post_build=post)
print '[issue 324 case 2]'
self.do_run(open(path_from_root('tests', 'freetype', 'main_3.c'), 'r').read(),
open(path_from_root('tests', 'freetype', 'ref_3.txt'), 'r').read(),
['font.ttf', 'W', '32', '32', '0'],
libraries=self.get_freetype(),
includes=[path_from_root('tests', 'freetype', 'include')],
post_build=post)
print '[issue 324 case 3]'
self.do_run('',
open(path_from_root('tests', 'freetype', 'ref_4.txt'), 'r').read(),
['font.ttf', 'ea', '40', '32', '0'],
no_build=True)
def test_sqlite(self):
# gcc -O3 -I/home/alon/Dev/emscripten/tests/sqlite -ldl src.c
if self.emcc_args is None: return self.skip('Very slow without ta2, and we would also need to include dlmalloc manually without emcc')
if Settings.QUANTUM_SIZE == 1: return self.skip('TODO FIXME')
pgo_data = read_pgo_data(path_from_root('tests', 'sqlite', 'sqlite-autooptimize.fails.txt'))
Settings.CORRECT_SIGNS = 1 # XXX: in default, we fail with 2 here, even though the pgo_data should be correct (and works in s_0_0). Investigate this.
Settings.CORRECT_SIGNS_LINES = pgo_data['signs_lines']
Settings.CORRECT_OVERFLOWS = 0
Settings.CORRECT_ROUNDINGS = 0
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # uses time.h to set random bytes, other stuff
Settings.DISABLE_EXCEPTION_CATCHING = 1
Settings.FAST_MEMORY = 4*1024*1024
Settings.EXPORTED_FUNCTIONS = ['_main', '_sqlite3_open', '_sqlite3_close', '_sqlite3_exec', '_sqlite3_free', '_callback'];
self.do_run(r'''
#define SQLITE_DISABLE_LFS
#define LONGDOUBLE_TYPE double
#define SQLITE_INT64_TYPE int
#define SQLITE_THREADSAFE 0
''' + open(path_from_root('tests', 'sqlite', 'sqlite3.c'), 'r').read() +
open(path_from_root('tests', 'sqlite', 'benchmark.c'), 'r').read(),
open(path_from_root('tests', 'sqlite', 'benchmark.txt'), 'r').read(),
includes=[path_from_root('tests', 'sqlite')],
force_c=True,
js_engines=[SPIDERMONKEY_ENGINE]) # V8 is slow
def test_zlib(self):
if self.emcc_args is not None and '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage
Settings.CORRECT_SIGNS = 1
self.do_run(open(path_from_root('tests', 'zlib', 'example.c'), 'r').read(),
open(path_from_root('tests', 'zlib', 'ref.txt'), 'r').read(),
libraries=self.get_library('zlib', os.path.join('libz.a'), make_args=['libz.a']),
includes=[path_from_root('tests', 'zlib')],
force_c=True)
def test_the_bullet(self): # Called thus so it runs late in the alphabetical cycle... it is long
if Building.LLVM_OPTS and self.emcc_args is None: Settings.SAFE_HEAP = 0 # Optimizations make it so we do not have debug info on the line we need to ignore
# Note: this is also a good test of per-file and per-line changes (since we have multiple files, and correct specific lines)
if Settings.SAFE_HEAP:
# Ignore bitfield warnings
Settings.SAFE_HEAP = 3
Settings.SAFE_HEAP_LINES = ['btVoronoiSimplexSolver.h:40', 'btVoronoiSimplexSolver.h:41',
'btVoronoiSimplexSolver.h:42', 'btVoronoiSimplexSolver.h:43']
self.do_run(open(path_from_root('tests', 'bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp'), 'r').read(),
[open(path_from_root('tests', 'bullet', 'output.txt'), 'r').read(), # different roundings
open(path_from_root('tests', 'bullet', 'output2.txt'), 'r').read()],
libraries=self.get_library('bullet', [os.path.join('src', '.libs', 'libBulletDynamics.a'),
os.path.join('src', '.libs', 'libBulletCollision.a'),
os.path.join('src', '.libs', 'libLinearMath.a')],
configure_args=['--disable-demos','--disable-dependency-tracking']),
includes=[path_from_root('tests', 'bullet', 'src')],
js_engines=[SPIDERMONKEY_ENGINE]) # V8 issue 1407
def test_poppler(self):
if self.emcc_args is None: return self.skip('very slow, we only do this in emcc runs')
Settings.CORRECT_OVERFLOWS = 1
Settings.CORRECT_SIGNS = 1
Building.COMPILER_TEST_OPTS += [
'-I' + path_from_root('tests', 'freetype', 'include'),
'-I' + path_from_root('tests', 'poppler', 'include'),
]
Settings.INVOKE_RUN = 0 # We append code that does run() ourselves
# See post(), below
input_file = open(os.path.join(self.get_dir(), 'paper.pdf.js'), 'w')
input_file.write(str(map(ord, open(path_from_root('tests', 'poppler', 'paper.pdf'), 'rb').read())))
input_file.close()
post = '''
def process(filename):
# To avoid loading this large file to memory and altering it, we simply append to the end
src = open(filename, 'a')
src.write(
\'\'\'
FS.createDataFile('/', 'paper.pdf', eval(Module.read('paper.pdf.js')), true, false);
run();
Module.print("Data: " + JSON.stringify(FS.root.contents['filename-1.ppm'].contents.map(function(x) { return unSign(x, 8) })));
\'\'\'
)
src.close()
'''
#fontconfig = self.get_library('fontconfig', [os.path.join('src', '.libs', 'libfontconfig.a')]) # Used in file, but not needed, mostly
freetype = self.get_freetype()
poppler = self.get_library('poppler',
[os.path.join('utils', 'pdftoppm.o'),
os.path.join('utils', 'parseargs.o'),
os.path.join('poppler', '.libs', 'libpoppler.a')],
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--enable-shared=no'])
# Combine libraries
combined = os.path.join(self.get_dir(), 'poppler-combined.bc')
Building.link(poppler + freetype, combined)
self.do_ll_run(combined,
map(ord, open(path_from_root('tests', 'poppler', 'ref.ppm'), 'r').read()).__str__().replace(' ', ''),
args='-scale-to 512 paper.pdf filename'.split(' '),
post_build=post)
#, build_ll_hook=self.do_autodebug)
def test_openjpeg(self):
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 1
else:
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = ["mqc.c:566", "mqc.c:317"]
post = '''
def process(filename):
import tools.shared as shared
original_j2k = shared.path_from_root('tests', 'openjpeg', 'syntensity_lobby_s.j2k')
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createDataFile('/', 'image.j2k', %s, true, false);" % shared.line_splitter(str(
map(ord, open(original_j2k, 'rb').read())
))
).replace(
'// {{POST_RUN_ADDITIONS}}',
"Module.print('Data: ' + JSON.stringify(FS.root.contents['image.raw'].contents));"
)
open(filename, 'w').write(src)
'''
shutil.copy(path_from_root('tests', 'openjpeg', 'opj_config.h'), self.get_dir())
lib = self.get_library('openjpeg',
[os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/index.c.o'.split('/')),
os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/convert.c.o'.split('/')),
os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/__/common/color.c.o'.split('/')),
os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/__/common/getopt.c.o'.split('/')),
os.path.join('bin', self.get_shared_library_name('libopenjpeg.so.1.4.0'))],
configure=['cmake', '.'],
#configure_args=['--enable-tiff=no', '--enable-jp3d=no', '--enable-png=no'],
make_args=[]) # no -j 2, since parallel builds can fail
# We use doubles in JS, so we get slightly different values than native code. So we
# check our output by comparing the average pixel difference
def image_compare(output):
# Get the image generated by JS, from the JSON.stringify'd array
m = re.search('\[[\d, -]*\]', output)
try:
js_data = eval(m.group(0))
except AttributeError:
print 'Failed to find proper image output in: ' + output
raise
js_data = map(lambda x: x if x >= 0 else 256+x, js_data) # Our output may be signed, so unsign it
# Get the correct output
true_data = open(path_from_root('tests', 'openjpeg', 'syntensity_lobby_s.raw'), 'rb').read()
# Compare them
assert(len(js_data) == len(true_data))
num = len(js_data)
diff_total = js_total = true_total = 0
for i in range(num):
js_total += js_data[i]
true_total += ord(true_data[i])
diff_total += abs(js_data[i] - ord(true_data[i]))
js_mean = js_total/float(num)
true_mean = true_total/float(num)
diff_mean = diff_total/float(num)
image_mean = 83.265
#print '[image stats:', js_mean, image_mean, true_mean, diff_mean, num, ']'
assert abs(js_mean - image_mean) < 0.01
assert abs(true_mean - image_mean) < 0.01
assert diff_mean < 0.01
return output
self.do_run(open(path_from_root('tests', 'openjpeg', 'codec', 'j2k_to_image.c'), 'r').read(),
'Successfully generated', # The real test for valid output is in image_compare
'-i image.j2k -o image.raw'.split(' '),
libraries=lib,
includes=[path_from_root('tests', 'openjpeg', 'libopenjpeg'),
path_from_root('tests', 'openjpeg', 'codec'),
path_from_root('tests', 'openjpeg', 'common'),
os.path.join(self.get_build_dir(), 'openjpeg')],
force_c=True,
post_build=post,
output_nicerizer=image_compare)#, build_ll_hook=self.do_autodebug)
def test_python(self):
if Settings.QUANTUM_SIZE == 1: return self.skip('TODO: make this work')
# Overflows in string_hash
Settings.CORRECT_OVERFLOWS = 1
Settings.CHECK_OVERFLOWS = 0
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # Has bitfields which are false positives. Also the PyFloat_Init tries to detect endianness.
Settings.CORRECT_SIGNS = 1 # Not sure why, but needed
Settings.EXPORTED_FUNCTIONS = ['_main', '_PyRun_SimpleStringFlags'] # for the demo
self.do_ll_run(path_from_root('tests', 'python', 'python.small.bc'),
'hello python world!\n[0, 2, 4, 6]\n5\n22\n5.470000',
args=['-S', '-c' '''print "hello python world!"; print [x*2 for x in range(4)]; t=2; print 10-3-t; print (lambda x: x*2)(11); print '%f' % 5.47'''])
def test_lifetime(self):
if self.emcc_args is None: return self.skip('test relies on emcc opts')
try:
os.environ['EMCC_LEAVE_INPUTS_RAW'] = '1'
self.do_ll_run(path_from_root('tests', 'lifetime.ll'), 'hello, world!\n')
if '-O1' in self.emcc_args or '-O2' in self.emcc_args:
assert 'a18' not in open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read(), 'lifetime stuff and their vars must be culled'
else:
assert 'a18' in open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read(), "without opts, it's there"
finally:
del os.environ['EMCC_LEAVE_INPUTS_RAW']
# Test cases in separate files. Note that these files may contain invalid .ll!
# They are only valid enough for us to read for test purposes, not for llvm-as
# to process.
def test_cases(self):
if Building.LLVM_OPTS: return self.skip("Our code is not exactly 'normal' llvm assembly")
try:
os.environ['EMCC_LEAVE_INPUTS_RAW'] = '1'
self.banned_js_engines = [NODE_JS] # node issue 1669, exception causes stdout not to be flushed
Settings.CHECK_OVERFLOWS = 0
for name in glob.glob(path_from_root('tests', 'cases', '*.ll')):
shortname = name.replace('.ll', '')
if '' not in shortname: continue
if '_ta2' in shortname and not Settings.USE_TYPED_ARRAYS == 2:
print self.skip('case "%s" only relevant for ta2' % shortname)
continue
print >> sys.stderr, "Testing case '%s'..." % shortname
output_file = path_from_root('tests', 'cases', shortname + '.txt')
if Settings.QUANTUM_SIZE == 1:
q1_output_file = path_from_root('tests', 'cases', shortname + '_q1.txt')
if os.path.exists(q1_output_file):
output_file = q1_output_file
if os.path.exists(output_file):
output = open(output_file, 'r').read()
else:
output = 'hello, world!'
if output.rstrip() != 'skip':
self.do_ll_run(path_from_root('tests', 'cases', name), output)
# Optional source checking, a python script that gets a global generated with the source
src_checker = path_from_root('tests', 'cases', shortname + '.py')
if os.path.exists(src_checker):
generated = open('src.cpp.o.js').read()
exec(open(src_checker).read())
finally:
del os.environ['EMCC_LEAVE_INPUTS_RAW']
# Autodebug the code
def do_autodebug(self, filename):
output = Popen(['python', AUTODEBUGGER, filename+'.o.ll', filename+'.o.ll.ll'], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
assert 'Success.' in output, output
self.prep_ll_run(filename, filename+'.o.ll.ll', force_recompile=True) # rebuild .bc # TODO: use code in do_autodebug_post for this
# Autodebug the code, after LLVM opts. Will only work once!
def do_autodebug_post(self, filename):
if not hasattr(self, 'post'):
print 'Asking for post re-call'
self.post = True
return True
print 'Autodebugging during post time'
delattr(self, 'post')
output = Popen(['python', AUTODEBUGGER, filename+'.o.ll', filename+'.o.ll.ll'], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
assert 'Success.' in output, output
shutil.copyfile(filename + '.o.ll.ll', filename + '.o.ll')
Building.llvm_as(filename)
Building.llvm_dis(filename)
def test_autodebug(self):
if Building.LLVM_OPTS: return self.skip('LLVM opts mess us up')
# Run a test that should work, generating some code
self.test_structs()
filename = os.path.join(self.get_dir(), 'src.cpp')
self.do_autodebug(filename)
# Compare to each other, and to expected output
self.do_ll_run(path_from_root('tests', filename+'.o.ll.ll'), '''AD:-1,1''')
assert open('stdout').read().startswith('AD:-1'), 'We must note when we enter functions'
# Test using build_ll_hook
src = '''
#include <stdio.h>
char cache[256], *next = cache;
int main()
{
cache[10] = 25;
next[20] = 51;
int x = cache[10];
double y = 11.52;
printf("*%d,%d,%.2f*\\n", x, cache[20], y);
return 0;
}
'''
self.do_run(src, '''AD:-1,1''', build_ll_hook=self.do_autodebug)
def test_profiling(self):
src = '''
#include <emscripten.h>
#include <unistd.h>
int main()
{
EMSCRIPTEN_PROFILE_INIT(3);
EMSCRIPTEN_PROFILE_BEGIN(0);
usleep(10 * 1000);
EMSCRIPTEN_PROFILE_END(0);
EMSCRIPTEN_PROFILE_BEGIN(1);
usleep(50 * 1000);
EMSCRIPTEN_PROFILE_END(1);
EMSCRIPTEN_PROFILE_BEGIN(2);
usleep(250 * 1000);
EMSCRIPTEN_PROFILE_END(2);
return 0;
}
'''
post1 = '''
def process(filename):
src = open(filename, 'a')
src.write(\'\'\'
Profiling.dump();
\'\'\')
src.close()
'''
self.do_run(src, '''Profiling data:
Block 0: ''', post_build=post1)
### Integration tests
def test_ccall(self):
if self.emcc_args is not None and '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here, to test we export things right
src = r'''
#include <stdio.h>
// Optimizations might wipe out our functions without this
#define KEEPALIVE __attribute__((used))
extern "C" {
int KEEPALIVE get_int() { return 5; }
float KEEPALIVE get_float() { return 3.14; }
char * KEEPALIVE get_string() { return "hello world"; }
void KEEPALIVE print_int(int x) { printf("%d\n", x); }
void KEEPALIVE print_float(float x) { printf("%.2f\n", x); }
void KEEPALIVE print_string(char *x) { printf("%s\n", x); }
int KEEPALIVE multi(int x, float y, int z, char *str) { if (x) puts(str); return (x+y)*z; }
int * KEEPALIVE pointer(int *in) { printf("%d\n", *in); static int ret = 21; return &ret; }
}
int main(int argc, char **argv) {
// keep them alive
if (argc == 10) return get_int();
if (argc == 11) return get_float();
if (argc == 12) return get_string()[0];
if (argc == 13) print_int(argv[0][0]);
if (argc == 14) print_float(argv[0][0]);
if (argc == 15) print_string(argv[0]);
if (argc == 16) pointer((int*)argv[0]);
if (argc % 17 == 12) return multi(argc, float(argc)/2, argc+1, argv[0]);
return 0;
}
'''
post = '''
def process(filename):
src = \'\'\'
var Module = {
'postRun': function() {
Module.print('*');
var ret;
ret = Module['ccall']('get_int', 'number'); Module.print([typeof ret, ret]);
ret = ccall('get_float', 'number'); Module.print([typeof ret, ret.toFixed(2)]);
ret = ccall('get_string', 'string'); Module.print([typeof ret, ret]);
ret = ccall('print_int', null, ['number'], [12]); Module.print(typeof ret);
ret = ccall('print_float', null, ['number'], [14.56]); Module.print(typeof ret);
ret = ccall('print_string', null, ['string'], ["cheez"]); Module.print(typeof ret);
ret = ccall('multi', 'number', ['number', 'number', 'number', 'string'], [2, 1.4, 3, 'more']); Module.print([typeof ret, ret]);
var p = ccall('malloc', 'pointer', ['number'], [4]);
setValue(p, 650, 'i32');
ret = ccall('pointer', 'pointer', ['pointer'], [p]); Module.print([typeof ret, getValue(ret, 'i32')]);
Module.print('*');
// part 2: cwrap
var multi = Module['cwrap']('multi', 'number', ['number', 'number', 'number', 'string']);
Module.print(multi(2, 1.4, 3, 'atr'));
Module.print(multi(8, 5.4, 4, 'bret'));
Module.print('*');
// part 3: avoid stack explosion
for (var i = 0; i < TOTAL_STACK/60; i++) {
ccall('multi', 'number', ['number', 'number', 'number', 'string'], [0, 0, 0, '123456789012345678901234567890123456789012345678901234567890']);
}
Module.print('stack is ok.');
}
};
\'\'\' + open(filename, 'r').read()
open(filename, 'w').write(src)
'''
Settings.EXPORTED_FUNCTIONS = ['_get_int', '_get_float', '_get_string', '_print_int', '_print_float', '_print_string', '_multi', '_pointer', '_malloc']
self.do_run(src, '*\nnumber,5\nnumber,3.14\nstring,hello world\n12\nundefined\n14.56\nundefined\ncheez\nundefined\nmore\nnumber,10\n650\nnumber,21\n*\natr\n10\nbret\n53\n*\nstack is ok.\n', post_build=post)
def test_scriptaclass(self):
header_filename = os.path.join(self.get_dir(), 'header.h')
header = '''
struct ScriptMe {
int value;
ScriptMe(int val);
int getVal(); // XXX Sadly, inlining these will result in LLVM not
// producing any code for them (when just building
// as a library)
void mulVal(int mul);
};
'''
h = open(header_filename, 'w')
h.write(header)
h.close()
src = '''
#include "header.h"
ScriptMe::ScriptMe(int val) : value(val) { }
int ScriptMe::getVal() { return value; }
void ScriptMe::mulVal(int mul) { value *= mul; }
'''
# Way 1: use demangler and namespacer
script_src = '''
var sme = Module._.ScriptMe.__new__(83); // malloc(sizeof(ScriptMe)), ScriptMe::ScriptMe(sme, 83) / new ScriptMe(83) (at addr sme)
Module._.ScriptMe.mulVal(sme, 2); // ScriptMe::mulVal(sme, 2) sme.mulVal(2)
Module.print('*' + Module._.ScriptMe.getVal(sme) + '*');
_free(sme);
Module.print('*ok*');
'''
post = '''
def process(filename):
Popen(['python', DEMANGLER, filename], stdout=open(filename + '.tmp', 'w')).communicate()
Popen(['python', NAMESPACER, filename, filename + '.tmp'], stdout=open(filename + '.tmp2', 'w')).communicate()
src = open(filename, 'r').read().replace(
'// {{MODULE_ADDITIONS}',
'Module["_"] = ' + open(filename + '.tmp2', 'r').read().replace('var ModuleNames = ', '').rstrip() + ';\n\n' + script_src + '\n\n' +
'// {{MODULE_ADDITIONS}'
)
open(filename, 'w').write(src)
'''
# XXX disable due to possible v8 bug -- self.do_run(src, '*166*\n*ok*', post_build=post)
if self.emcc_args is not None and '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here, to test we export things right
# Way 2: use CppHeaderParser
Settings.RUNTIME_TYPE_INFO = 1
header = '''
#include <stdio.h>
class Parent {
protected:
int value;
public:
Parent(int val);
int getVal() { return value; }; // inline should work just fine here, unlike Way 1 before
void mulVal(int mul);
};
class Child1 : public Parent {
public:
Child1() : Parent(7) { printf("Child1:%d\\n", value); };
Child1(int val) : Parent(val*2) { value -= 1; printf("Child1:%d\\n", value); };
int getValSqr() { return value*value; }
int getValSqr(int more) { return value*value*more; }
int getValTimes(int times=1) { return value*times; }
};
class Child2 : public Parent {
public:
Child2() : Parent(9) { printf("Child2:%d\\n", value); };
int getValCube() { return value*value*value; }
static void printStatic() { printf("*static*\\n"); }
virtual void virtualFunc() { printf("*virtualf*\\n"); }
virtual void virtualFunc2() { printf("*virtualf2*\\n"); }
static void runVirtualFunc(Child2 *self) { self->virtualFunc(); };
private:
void doSomethingSecret() { printf("security breached!\\n"); }; // we should not be able to do this
};
'''
open(header_filename, 'w').write(header)
basename = os.path.join(self.get_dir(), 'bindingtest')
output = Popen(['python', BINDINGS_GENERATOR, basename, header_filename], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
#print output
assert 'Traceback' not in output, 'Failure in binding generation: ' + output
src = '''
#include "header.h"
Parent::Parent(int val) : value(val) { printf("Parent:%d\\n", val); }
void Parent::mulVal(int mul) { value *= mul; }
#include "bindingtest.cpp"
'''
post2 = '''
def process(filename):
src = open(filename, 'a')
src.write(open('bindingtest.js').read() + '\\n\\n')
src.close()
'''
post3 = '''
def process(filename):
script_src_2 = \'\'\'
var sme = new Module.Parent(42);
sme.mulVal(2);
Module.print('*')
Module.print(sme.getVal());
Module.print('c1');
var c1 = new Module.Child1();
Module.print(c1.getVal());
c1.mulVal(2);
Module.print(c1.getVal());
Module.print(c1.getValSqr());
Module.print(c1.getValSqr(3));
Module.print(c1.getValTimes()); // default argument should be 1
Module.print(c1.getValTimes(2));
Module.print('c1 v2');
c1 = new Module.Child1(8); // now with a parameter, we should handle the overloading automatically and properly and use constructor #2
Module.print(c1.getVal());
c1.mulVal(2);
Module.print(c1.getVal());
Module.print(c1.getValSqr());
Module.print(c1.getValSqr(3));
Module.print('c2')
var c2 = new Module.Child2();
Module.print(c2.getVal());
c2.mulVal(2);
Module.print(c2.getVal());
Module.print(c2.getValCube());
var succeeded;
try {
succeeded = 0;
Module.print(c2.doSomethingSecret()); // should fail since private
succeeded = 1;
} catch(e) {}
Module.print(succeeded);
try {
succeeded = 0;
Module.print(c2.getValSqr()); // function from the other class
succeeded = 1;
} catch(e) {}
Module.print(succeeded);
try {
succeeded = 0;
c2.getValCube(); // sanity
succeeded = 1;
} catch(e) {}
Module.print(succeeded);
Module.Child2.prototype.printStatic(); // static calls go through the prototype
// virtual function
c2.virtualFunc();
Module.Child2.prototype.runVirtualFunc(c2);
c2.virtualFunc2();
// extend the class from JS
var c3 = new Module.Child2;
Module.customizeVTable(c3, [{
original: Module.Child2.prototype.virtualFunc,
replacement: function() {
Module.print('*js virtualf replacement*');
}
}, {
original: Module.Child2.prototype.virtualFunc2,
replacement: function() {
Module.print('*js virtualf2 replacement*');
}
}]);
c3.virtualFunc();
Module.Child2.prototype.runVirtualFunc(c3);
c3.virtualFunc2();
c2.virtualFunc(); // original should remain the same
Module.Child2.prototype.runVirtualFunc(c2);
c2.virtualFunc2();
Module.print('*ok*');
\'\'\'
src = open(filename, 'a')
src.write(script_src_2 + '\\n')
src.close()
'''
self.do_run(src, '''*
84
c1
Parent:7
Child1:7
7
14
196
588
14
28
c1 v2
Parent:16
Child1:15
15
30
900
2700
c2
Parent:9
Child2:9
9
18
5832
0
0
1
*static*
*virtualf*
*virtualf*
*virtualf2*
Parent:9
Child2:9
*js virtualf replacement*
*js virtualf replacement*
*js virtualf2 replacement*
*virtualf*
*virtualf*
*virtualf2*
*ok*
''', post_build=[post2, post3])
def test_scriptaclass_2(self):
header_filename = os.path.join(self.get_dir(), 'header.h')
header = '''
#include <stdio.h>
#include <string.h>
class StringUser {
char *s;
int i;
public:
StringUser(char *string, int integer) : s(strdup(string)), i(integer) {}
void Print(int anotherInteger, char *anotherString) {
printf("|%s|%d|%s|%d|\\n", s, i, anotherString, anotherInteger);
}
void CallOther(StringUser *fr) { fr->Print(i, s); }
};
'''
open(header_filename, 'w').write(header)
basename = os.path.join(self.get_dir(), 'bindingtest')
output = Popen(['python', BINDINGS_GENERATOR, basename, header_filename], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
#print output
assert 'Traceback' not in output, 'Failure in binding generation: ' + output
src = '''
#include "header.h"
#include "bindingtest.cpp"
'''
post = '''
def process(filename):
src = open(filename, 'a')
src.write(open('bindingtest.js').read() + '\\n\\n')
src.write(\'\'\'
var user = new Module.StringUser("hello", 43);
user.Print(41, "world");
\'\'\')
src.close()
'''
self.do_run(src, '|hello|43|world|41|', post_build=post)
def test_typeinfo(self):
if self.emcc_args is not None and self.emcc_args != []: return self.skip('full LLVM opts optimize out all the code that uses the type')
Settings.RUNTIME_TYPE_INFO = 1
if Settings.QUANTUM_SIZE != 4: return self.skip('We assume normal sizes in the output here')
src = '''
#include<stdio.h>
struct UserStruct {
int x;
char y;
short z;
};
struct Encloser {
short x;
UserStruct us;
int y;
};
int main() {
Encloser e;
e.us.y = 5;
printf("*ok:%d*\\n", e.us.y);
return 0;
}
'''
post = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{POST_RUN_ADDITIONS}}',
\'\'\'
if (Runtime.typeInfo) {
Module.print('|' + Runtime.typeInfo.UserStruct.fields + '|' + Runtime.typeInfo.UserStruct.flatIndexes + '|');
var t = Runtime.generateStructInfo(['x', { us: ['x', 'y', 'z'] }, 'y'], 'Encloser')
Module.print('|' + [t.x, t.us.x, t.us.y, t.us.z, t.y] + '|');
Module.print('|' + JSON.stringify(Runtime.generateStructInfo(['x', 'y', 'z'], 'UserStruct')) + '|');
} else {
Module.print('No type info.');
}
\'\'\'
)
open(filename, 'w').write(src)
'''
self.do_run(src,
'*ok:5*\n|i32,i8,i16|0,4,6|\n|0,4,8,10,12|\n|{"__size__":8,"x":0,"y":4,"z":6}|',
post_build=post)
# Make sure that without the setting, we don't spam the .js with the type info
Settings.RUNTIME_TYPE_INFO = 0
self.do_run(src, 'No type info.', post_build=post)
### Tests for tools
def test_safe_heap(self):
if not Settings.SAFE_HEAP: return self.skip('We need SAFE_HEAP to test SAFE_HEAP')
if Settings.USE_TYPED_ARRAYS == 2: return self.skip('It is ok to violate the load-store assumption with TA2')
if Building.LLVM_OPTS: return self.skip('LLVM can optimize away the intermediate |x|')
src = '''
#include<stdio.h>
int main() {
int *x = new int;
*x = 20;
float *y = (float*)x;
printf("%f\\n", *y);
printf("*ok*\\n");
return 0;
}
'''
try:
self.do_run(src, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e)
# And we should not fail if we disable checking on that line
Settings.SAFE_HEAP = 3
Settings.SAFE_HEAP_LINES = ["src.cpp:7"]
self.do_run(src, '*ok*')
# But if we disable the wrong lines, we still fail
Settings.SAFE_HEAP_LINES = ["src.cpp:99"]
try:
self.do_run(src, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e)
# And reverse the checks with = 2
Settings.SAFE_HEAP = 2
Settings.SAFE_HEAP_LINES = ["src.cpp:99"]
self.do_run(src, '*ok*')
Settings.SAFE_HEAP = 1
# Linking multiple files should work too
module = '''
#include<stdio.h>
void callFunc() {
int *x = new int;
*x = 20;
float *y = (float*)x;
printf("%f\\n", *y);
}
'''
module_name = os.path.join(self.get_dir(), 'module.cpp')
open(module_name, 'w').write(module)
main = '''
#include<stdio.h>
extern void callFunc();
int main() {
callFunc();
int *x = new int;
*x = 20;
float *y = (float*)x;
printf("%f\\n", *y);
printf("*ok*\\n");
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.cpp')
open(main_name, 'w').write(main)
Building.emcc(module_name, ['-g'])
Building.emcc(main_name, ['-g'])
all_name = os.path.join(self.get_dir(), 'all.bc')
Building.link([module_name + '.o', main_name + '.o'], all_name)
try:
self.do_ll_run(all_name, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e)
# And we should not fail if we disable checking on those lines
Settings.SAFE_HEAP = 3
Settings.SAFE_HEAP_LINES = ["module.cpp:7", "main.cpp:9"]
self.do_ll_run(all_name, '*ok*')
# But we will fail if we do not disable exactly what we need to - any mistake leads to error
for lines in [["module.cpp:22", "main.cpp:9"], ["module.cpp:7", "main.cpp:29"], ["module.cpp:127", "main.cpp:449"], ["module.cpp:7"], ["main.cpp:9"]]:
Settings.SAFE_HEAP_LINES = lines
try:
self.do_ll_run(all_name, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e)
def test_check_overflow(self):
Settings.CHECK_OVERFLOWS = 1
Settings.CORRECT_OVERFLOWS = 0
src = '''
#include<stdio.h>
int main() {
int t = 77;
for (int i = 0; i < 30; i++) {
//t = (t << 2) + t + 1; // This would have worked, since << forces into 32-bit int...
t = t*5 + 1; // Python lookdict_string has ~the above line, which turns into this one with optimizations...
printf("%d,%d\\n", t, t & 127);
}
return 0;
}
'''
try:
self.do_run(src, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Too many corrections' in str(e), str(e)
def test_debug(self):
src = '''
#include <stdio.h>
#include <assert.h>
void checker(int x) {
x += 20;
assert(x < 15); // this is line 7!
}
int main() {
checker(10);
return 0;
}
'''
try:
post = r'''
def process(filename):
lines = open(filename, 'r').readlines()
lines = filter(lambda line: '___assert_fail(' in line or '___assert_func(' in line, lines)
found_line_num = any(('//@line 7 "' in line) for line in lines)
found_filename = any(('src.cpp"\n' in line) for line in lines)
assert found_line_num, 'Must have debug info with the line number'
assert found_filename, 'Must have debug info with the filename'
'''
self.do_run(src, '*nothingatall*', post_build=post)
except Exception, e:
# This test *should* fail
assert 'Assertion failed' in str(e), str(e)
def test_linespecific(self):
if self.emcc_args: self.emcc_args += ['--llvm-opts', '0'] # llvm full opts make the expected failures here not happen
Settings.CHECK_SIGNS = 0
Settings.CHECK_OVERFLOWS = 0
# Signs
src = '''
#include <stdio.h>
#include <assert.h>
int main()
{
int varey = 100;
unsigned int MAXEY = -1;
printf("*%d*\\n", varey >= MAXEY); // 100 >= -1? not in unsigned!
}
'''
Settings.CORRECT_SIGNS = 0
self.do_run(src, '*1*') # This is a fail - we expect 0
Settings.CORRECT_SIGNS = 1
self.do_run(src, '*0*') # Now it will work properly
# And now let's fix just that one line
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = ["src.cpp:9"]
self.do_run(src, '*0*')
# Fixing the wrong line should not work
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = ["src.cpp:3"]
self.do_run(src, '*1*')
# And reverse the checks with = 2
Settings.CORRECT_SIGNS = 3
Settings.CORRECT_SIGNS_LINES = ["src.cpp:3"]
self.do_run(src, '*0*')
Settings.CORRECT_SIGNS = 3
Settings.CORRECT_SIGNS_LINES = ["src.cpp:9"]
self.do_run(src, '*1*')
Settings.CORRECT_SIGNS = 0
# Overflows
src = '''
#include<stdio.h>
int main() {
int t = 77;
for (int i = 0; i < 30; i++) {
t = t*5 + 1;
}
printf("*%d,%d*\\n", t, t & 127);
return 0;
}
'''
correct = '*186854335,63*'
Settings.CORRECT_OVERFLOWS = 0
try:
self.do_run(src, correct)
raise Exception('UNEXPECTED-PASS')
except Exception, e:
assert 'UNEXPECTED' not in str(e), str(e)
assert 'Expected to find' in str(e), str(e)
Settings.CORRECT_OVERFLOWS = 1
self.do_run(src, correct) # Now it will work properly
# And now let's fix just that one line
Settings.CORRECT_OVERFLOWS = 2
Settings.CORRECT_OVERFLOWS_LINES = ["src.cpp:6"]
self.do_run(src, correct)
# Fixing the wrong line should not work
Settings.CORRECT_OVERFLOWS = 2
Settings.CORRECT_OVERFLOWS_LINES = ["src.cpp:3"]
try:
self.do_run(src, correct)
raise Exception('UNEXPECTED-PASS')
except Exception, e:
assert 'UNEXPECTED' not in str(e), str(e)
assert 'Expected to find' in str(e), str(e)
# And reverse the checks with = 2
Settings.CORRECT_OVERFLOWS = 3
Settings.CORRECT_OVERFLOWS_LINES = ["src.cpp:3"]
self.do_run(src, correct)
Settings.CORRECT_OVERFLOWS = 3
Settings.CORRECT_OVERFLOWS_LINES = ["src.cpp:6"]
try:
self.do_run(src, correct)
raise Exception('UNEXPECTED-PASS')
except Exception, e:
assert 'UNEXPECTED' not in str(e), str(e)
assert 'Expected to find' in str(e), str(e)
Settings.CORRECT_OVERFLOWS = 0
# Roundings
src = '''
#include <stdio.h>
#include <assert.h>
int main()
{
TYPE x = -5;
printf("*%d*", x/2);
x = 5;
printf("*%d*", x/2);
float y = -5.33;
x = y;
printf("*%d*", x);
y = 5.33;
x = y;
printf("*%d*", x);
printf("\\n");
}
'''
if Settings.USE_TYPED_ARRAYS != 2: # the errors here are very specific to non-i64 mode 1
Settings.CORRECT_ROUNDINGS = 0
self.do_run(src.replace('TYPE', 'long long'), '*-3**2**-6**5*') # JS floor operations, always to the negative. This is an undetected error here!
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*') # We get these right, since they are 32-bit and we can shortcut using the |0 trick
self.do_run(src.replace('TYPE', 'unsigned int'), '*-3**2**-6**5*') # We fail, since no fast shortcut for 32-bit unsigneds
Settings.CORRECT_ROUNDINGS = 1
Settings.CORRECT_SIGNS = 1 # To be correct here, we need sign corrections as well
self.do_run(src.replace('TYPE', 'long long'), '*-2**2**-5**5*') # Correct
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*') # Correct
self.do_run(src.replace('TYPE', 'unsigned int'), '*2147483645**2**-5**5*') # Correct
Settings.CORRECT_SIGNS = 0
if Settings.USE_TYPED_ARRAYS != 2: # the errors here are very specific to non-i64 mode 1
Settings.CORRECT_ROUNDINGS = 2
Settings.CORRECT_ROUNDINGS_LINES = ["src.cpp:13"] # Fix just the last mistake
self.do_run(src.replace('TYPE', 'long long'), '*-3**2**-5**5*')
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*') # Here we are lucky and also get the first one right
self.do_run(src.replace('TYPE', 'unsigned int'), '*-3**2**-5**5*') # No such luck here
# And reverse the check with = 2
if Settings.USE_TYPED_ARRAYS != 2: # the errors here are very specific to non-i64 mode 1
Settings.CORRECT_ROUNDINGS = 3
Settings.CORRECT_ROUNDINGS_LINES = ["src.cpp:999"]
self.do_run(src.replace('TYPE', 'long long'), '*-2**2**-5**5*')
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*')
Settings.CORRECT_SIGNS = 1 # To be correct here, we need sign corrections as well
self.do_run(src.replace('TYPE', 'unsigned int'), '*2147483645**2**-5**5*')
Settings.CORRECT_SIGNS = 0
def test_pgo(self):
Settings.PGO = Settings.CHECK_OVERFLOWS = Settings.CORRECT_OVERFLOWS = Settings.CHECK_SIGNS = Settings.CORRECT_SIGNS = 1
src = '''
#include<stdio.h>
int main() {
int t = 77;
for (int i = 0; i < 30; i++) {
t = t*5 + 1;
}
printf("*%d,%d*\\n", t, t & 127);
int varey = 100;
unsigned int MAXEY = -1;
for (int j = 0; j < 2; j++) {
printf("*%d*\\n", varey >= MAXEY); // 100 >= -1? not in unsigned!
MAXEY = 1; // So we succeed the second time around
}
return 0;
}
'''
def check(output):
# TODO: check the line #
if self.emcc_args is None or self.emcc_args == []: # LLVM full opts optimize out some corrections
assert 'Overflow|src.cpp:6 : 60 hits, %20 failures' in output, 'no indication of Overflow corrections: ' + output
assert 'UnSign|src.cpp:13 : 6 hits, %17 failures' in output, 'no indication of Sign corrections: ' + output
return output
self.do_run(src, '*186854335,63*\n', output_nicerizer=check)
Settings.PGO = Settings.CHECK_OVERFLOWS = Settings.CORRECT_OVERFLOWS = Settings.CHECK_SIGNS = Settings.CORRECT_SIGNS = 0
# Now, recompile with the PGO data, and it should work
pgo_data = read_pgo_data(self.get_stdout_path())
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = pgo_data['signs_lines']
Settings.CORRECT_OVERFLOWS = 2
Settings.CORRECT_OVERFLOWS_LINES = pgo_data['overflows_lines']
self.do_run(src, '*186854335,63*\n')
# Sanity check: Without PGO, we will fail
try:
self.do_run(src, '*186854335,63*\n')
except:
pass
def test_exit_status(self):
Settings.CATCH_EXIT_CODE = 1
src = r'''
#include <stdio.h>
#include <stdlib.h>
static void cleanup() {
printf("cleanup\n");
}
int main()
{
atexit(cleanup); // this atexit should still be called
printf("hello, world!\n");
exit(118); // Unusual exit status to make sure it's working!
}
'''
self.do_run(src, 'hello, world!\ncleanup\nExit Status: 118')
# Generate tests for everything
def make_run(fullname, name=-1, compiler=-1, llvm_opts=0, embetter=0, quantum_size=0, typed_arrays=0, emcc_args=None):
exec('''
class %s(T):
def tearDown(self):
super(%s, self).tearDown()
def setUp(self):
super(%s, self).setUp()
Building.COMPILER_TEST_OPTS = ['-g']
os.chdir(self.get_dir()) # Ensure the directory exists and go there
Building.COMPILER = %r
self.emcc_args = %s
if self.emcc_args is not None:
Settings.load(self.emcc_args)
Building.LLVM_OPTS = 0
return
llvm_opts = %d # 1 is yes, 2 is yes and unsafe
embetter = %d
quantum_size = %d
# TODO: Move much of these to a init() function in shared.py, and reuse that
Settings.USE_TYPED_ARRAYS = %d
Settings.INVOKE_RUN = 1
Settings.RELOOP = 0 # we only do them in the "o2" pass
Settings.MICRO_OPTS = embetter
Settings.QUANTUM_SIZE = quantum_size
Settings.ASSERTIONS = 1-embetter
Settings.SAFE_HEAP = 1-(embetter and llvm_opts)
Building.LLVM_OPTS = llvm_opts
Settings.PGO = 0
Settings.CHECK_OVERFLOWS = 1-(embetter or llvm_opts)
Settings.CORRECT_OVERFLOWS = 1-(embetter and llvm_opts)
Settings.CORRECT_SIGNS = 0
Settings.CORRECT_ROUNDINGS = 0
Settings.CORRECT_OVERFLOWS_LINES = CORRECT_SIGNS_LINES = CORRECT_ROUNDINGS_LINES = SAFE_HEAP_LINES = []
Settings.CHECK_SIGNS = 0 #1-(embetter or llvm_opts)
Settings.INIT_STACK = 0
Settings.RUNTIME_TYPE_INFO = 0
Settings.DISABLE_EXCEPTION_CATCHING = 0
Settings.PROFILE = 0
Settings.INCLUDE_FULL_LIBRARY = 0
Settings.BUILD_AS_SHARED_LIB = 0
Settings.RUNTIME_LINKED_LIBS = []
Settings.CATCH_EXIT_CODE = 0
Settings.EMULATE_UNALIGNED_ACCESSES = int(Settings.USE_TYPED_ARRAYS == 2 and Building.LLVM_OPTS == 2)
Settings.DOUBLE_MODE = 1 if Settings.USE_TYPED_ARRAYS and Building.LLVM_OPTS == 0 else 0
Settings.PRECISE_I64_MATH = 0
Building.pick_llvm_opts(3)
TT = %s
''' % (fullname, fullname, fullname, compiler, str(emcc_args), llvm_opts, embetter, quantum_size, typed_arrays, fullname))
return TT
# Make one run with the defaults
exec('default = make_run("default", compiler=CLANG, emcc_args=[])')
# Make one run with -O1, with safe heap
exec('o1 = make_run("o1", compiler=CLANG, emcc_args=["-O1", "-s", "SAFE_HEAP=1"])')
# Make one run with -O2, but without closure (we enable closure in specific tests, otherwise on everything it is too slow)
exec('o2 = make_run("o2", compiler=CLANG, emcc_args=["-O2", "--closure", "0"])')
# Make custom runs with various options
for compiler, quantum, embetter, typed_arrays, llvm_opts in [
(CLANG, 1, 1, 0, 0),
(CLANG, 1, 1, 1, 1),
(CLANG, 4, 0, 0, 0),
(CLANG, 4, 0, 0, 1),
(CLANG, 4, 1, 1, 0),
(CLANG, 4, 1, 1, 1),
]:
fullname = 's_%d_%d%s%s' % (
llvm_opts, embetter, '' if quantum == 4 else '_q' + str(quantum), '' if typed_arrays in [0, 1] else '_t' + str(typed_arrays)
)
exec('%s = make_run(fullname, %r,%r,%d,%d,%d,%d)' % (fullname, fullname, compiler, llvm_opts, embetter, quantum, typed_arrays))
del T # T is just a shape for the specific subclasses, we don't test it itself
class other(RunnerCore):
def test_emcc(self):
for compiler in [EMCC, EMXX]:
shortcompiler = os.path.basename(compiler)
suffix = '.c' if compiler == EMCC else '.cpp'
# --version
output = Popen(['python', compiler, '--version'], stdout=PIPE, stderr=PIPE).communicate()
self.assertContained('''emcc (Emscripten GCC-like replacement) 2.0
Copyright (C) 2011 the Emscripten authors.
This is free and open source software under the MIT license.
There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
''', output[0].replace('\r', ''), output[1].replace('\r', ''))
# --help
output = Popen(['python', compiler, '--help'], stdout=PIPE, stderr=PIPE).communicate()
self.assertContained('''%s [options] file...
Most normal gcc/g++ options will work, for example:
--help Display this information
--version Display compiler version information
Options that are modified or new in %s include:
-O0 No optimizations (default)
''' % (shortcompiler, shortcompiler), output[0].replace('\r', ''), output[1].replace('\r', ''))
# emcc src.cpp ==> writes a.out.js
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'hello_world' + suffix)], stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
# properly report source code errors, and stop there
self.clear()
assert not os.path.exists('a.out.js')
output = Popen(['python', compiler, path_from_root('tests', 'hello_world_error' + suffix)], stdout=PIPE, stderr=PIPE).communicate()
assert not os.path.exists('a.out.js'), 'compilation failed, so no output file is expected'
assert len(output[0]) == 0, output[0]
self.assertNotContained('IOError', output[1]) # no python stack
self.assertNotContained('Traceback', output[1]) # no python stack
self.assertContained('error: invalid preprocessing directive', output[1])
self.assertContained("error: use of undeclared identifier 'cheez", output[1])
self.assertContained('2 errors generated', output[1])
assert output[1].split('2 errors generated.')[1].replace('\r', '').replace('\n', '') == 'emcc: compiler frontend failed to generate LLVM bitcode, halting'
# emcc src.cpp -c and emcc src.cpp -o src.[o|bc] ==> should give a .bc file
# regression check: -o js should create "js", with bitcode content
for args in [['-c'], ['-o', 'src.o'], ['-o', 'src.bc'], ['-o', 'src.so'], ['-o', 'js']]:
target = args[1] if len(args) == 2 else 'hello_world.o'
self.clear()
Popen(['python', compiler, path_from_root('tests', 'hello_world' + suffix)] + args, stdout=PIPE, stderr=PIPE).communicate()
syms = Building.llvm_nm(target)
assert len(syms.defs) == 1 and 'main' in syms.defs, 'Failed to generate valid bitcode'
if target == 'js': # make sure emcc can recognize the target as a bitcode file
shutil.move(target, target + '.bc')
target += '.bc'
output = Popen(['python', compiler, target, '-o', target + '.js'], stdout = PIPE, stderr = PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists(target + '.js'), 'Expected %s to exist since args are %s : %s' % (target + '.js', str(args), '\n'.join(output))
self.assertContained('hello, world!', run_js(target + '.js'))
# handle singleton archives
self.clear()
Popen(['python', compiler, path_from_root('tests', 'hello_world' + suffix), '-o', 'a.bc'], stdout=PIPE, stderr=PIPE).communicate()
Popen([LLVM_AR, 'r', 'a.a', 'a.bc'], stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists('a.a')
output = Popen(['python', compiler, 'a.a']).communicate()
assert os.path.exists('a.out.js'), output
self.assertContained('hello, world!', run_js('a.out.js'))
# emcc src.ll ==> generates .js
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'hello_world.ll')], stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
# emcc [..] -o [path] ==> should work with absolute paths
try:
os.mkdir('a_dir')
os.chdir('a_dir')
os.mkdir('b_dir')
for path in [os.path.abspath(os.path.join('..', 'file1.js')), os.path.join('b_dir', 'file2.js')]:
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'hello_world.ll'), '-o', path], stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists(path), path + ' does not exist; ' + '\n'.join(output)
self.assertContained('hello, world!', run_js(path))
finally:
os.chdir(self.get_dir())
try:
shutil.rmtree('a_dir')
except:
pass
# dlmalloc. dlmalloc is special in that it is the only part of libc that is (1) hard to write well, and
# very speed-sensitive. So we do not implement it in JS in library.js, instead we compile it from source
for source, has_malloc in [('hello_world' + suffix, False), ('hello_malloc.cpp', True)]:
self.clear()
output = Popen(['python', compiler, path_from_root('tests', source)], stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
generated = open('a.out.js').read()
assert ('function _malloc(bytes) {' in generated) == (not has_malloc), 'If malloc is needed, it should be there, if not not'
# Optimization: emcc src.cpp -o something.js [-Ox]. -O0 is the same as not specifying any optimization setting
for params, opt_level, bc_params, closure, has_malloc in [ # bc params are used after compiling to bitcode
(['-o', 'something.js'], 0, None, 0, 1),
(['-o', 'something.js', '-O0'], 0, None, 0, 0),
(['-o', 'something.js', '-O1'], 1, None, 0, 0),
(['-o', 'something.js', '-O1', '--closure', '1'], 1, None, 1, 0),
(['-o', 'something.js', '-O2'], 2, None, 1, 1),
(['-o', 'something.js', '-O2', '--closure', '0'], 2, None, 0, 0),
(['-o', 'something.js', '-O3'], 3, None, 1, 1),
(['-o', 'something.js', '-O3', '--closure', '0'], 3, None, 0, 0),
# and, test compiling to bitcode first
(['-o', 'something.bc'], 0, [], 0, 0),
(['-o', 'something.bc'], 0, ['-O0'], 0, 0),
(['-o', 'something.bc'], 1, ['-O1'], 0, 0),
(['-o', 'something.bc'], 2, ['-O2'], 1, 0),
(['-o', 'something.bc'], 3, ['-O3'], 1, 0),
(['-O1', '-o', 'something.bc'], 0, [], 0, 0), # -Ox is ignored and warned about
]:
#print params, opt_level, bc_params, closure
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'hello_world_loop' + ('_malloc' if has_malloc else '') + '.cpp')] + params,
stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
if bc_params is not None:
if '-O1' in params and 'something.bc' in params:
assert 'warning: -Ox flags ignored, since not generating JavaScript' in output[1]
assert os.path.exists('something.bc'), output[1]
output = Popen(['python', compiler, 'something.bc', '-o', 'something.js'] + bc_params, stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists('something.js'), output[1]
assert ('Warning: Applying some potentially unsafe optimizations!' in output[1]) == (opt_level >= 3), 'unsafe warning should appear in opt >= 3'
self.assertContained('hello, world!', run_js('something.js'))
# Verify optimization level etc. in the generated code
# XXX these are quite sensitive, and will need updating when code generation changes
generated = open('something.js').read() # TODO: parse out the _main function itself, not support code, if the tests below need that some day
assert 'new Uint16Array' in generated and 'new Uint32Array' in generated, 'typed arrays 2 should be used by default'
assert 'SAFE_HEAP' not in generated, 'safe heap should not be used by default'
assert ': while(' not in generated, 'when relooping we also js-optimize, so there should be no labelled whiles'
if closure:
assert 'Module._main=' in generated, 'closure compiler should have been run (and output should be minified)'
else:
# closure has not been run, we can do some additional checks. TODO: figure out how to do these even with closure
assert 'Module._main = ' not in generated, 'closure compiler should not have been run'
# XXX find a way to test this: assert ('& 255' in generated or '&255' in generated) == (opt_level <= 2), 'corrections should be in opt <= 2'
assert ('(__label__)' in generated) == (opt_level <= 1), 'relooping should be in opt >= 2'
assert ('assert(STACKTOP < STACK_MAX' in generated) == (opt_level == 0), 'assertions should be in opt == 0'
assert 'var $i;' in generated or 'var $i_01;' in generated or 'var $storemerge3;' in generated or 'var $storemerge4;' in generated or 'var $i_04;' in generated, 'micro opts should always be on'
if opt_level >= 1:
assert 'HEAP8[HEAP32[' in generated or 'HEAP8[$vla1 + (($storemerge4 | 0) / 2 & -1) | 0]' in generated or 'HEAP8[$vla1 + (($storemerge4 | 0) / 2 & -1) | 0]' in generated or 'HEAP8[$vla1 + (($i_04 | 0) / 2 & -1) | 0]' in generated or 'HEAP8[$vla1 + ($i_04 / 2 & -1)]' in generated or 'HEAP8[$1 + (($i_01 | 0) / 2 & -1) | 0]' in generated or 'HEAP8[$1 + (($i_01 | 0) / 2 & -1) | 0]' in generated or 'HEAP8[$1 + ($i_01 / 2 & -1)]' in generated, 'eliminator should create compound expressions, and fewer one-time vars'
assert ('_puts(' in generated) == (opt_level >= 1), 'with opt >= 1, llvm opts are run and they should optimize printf to puts'
assert ('function _malloc(bytes) {' in generated) == (not has_malloc), 'If malloc is needed, it should be there, if not not'
assert 'function _main() {' in generated, 'Should be unminified, including whitespace'
assert 'function _dump' in generated, 'No inlining by default'
# emcc -s RELOOP=1 src.cpp ==> should pass -s to emscripten.py. --typed-arrays is a convenient alias for -s USE_TYPED_ARRAYS
for params, test, text in [
(['-s', 'INLINING_LIMIT=0'], lambda generated: 'function _dump' in generated, 'no inlining without opts'),
(['-O1', '-s', 'INLINING_LIMIT=0'], lambda generated: 'function _dump' not in generated, 'inlining'),
(['-s', 'USE_TYPED_ARRAYS=0'], lambda generated: 'new Int32Array' not in generated, 'disable typed arrays'),
(['-s', 'USE_TYPED_ARRAYS=1'], lambda generated: 'IHEAPU = ' in generated, 'typed arrays 1 selected'),
([], lambda generated: 'Module["_dump"]' not in generated, 'dump is not exported by default'),
(['-s', 'EXPORTED_FUNCTIONS=["_main", "_dump"]'], lambda generated: 'Module["_dump"]' in generated, 'dump is now exported'),
(['--typed-arrays', '0'], lambda generated: 'new Int32Array' not in generated, 'disable typed arrays'),
(['--typed-arrays', '1'], lambda generated: 'IHEAPU = ' in generated, 'typed arrays 1 selected'),
(['--typed-arrays', '2'], lambda generated: 'new Uint16Array' in generated and 'new Uint32Array' in generated, 'typed arrays 2 selected'),
(['--llvm-opts', '1'], lambda generated: '_puts(' in generated, 'llvm opts requested'),
]:
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'hello_world_loop.cpp'), '-o', 'a.out.js'] + params, stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
assert test(open('a.out.js').read()), text
# Compiling two source files into a final JS.
for args, target in [([], 'a.out.js'), (['-o', 'combined.js'], 'combined.js')]:
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'twopart_main.cpp'), path_from_root('tests', 'twopart_side.cpp')] + args,
stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists(target), '\n'.join(output)
self.assertContained('side got: hello from main, over', run_js(target))
# Compiling two files with -c will generate separate .bc files
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'twopart_main.cpp'), path_from_root('tests', 'twopart_side.cpp'), '-c'] + args,
stdout=PIPE, stderr=PIPE).communicate()
if '-o' in args:
# specifying -o and -c is an error
assert 'fatal error' in output[1], output[1]
continue
assert os.path.exists('twopart_main.o'), '\n'.join(output)
assert os.path.exists('twopart_side.o'), '\n'.join(output)
assert not os.path.exists(target), 'We should only have created bitcode here: ' + '\n'.join(output)
# Compiling one of them alone is expected to fail
output = Popen(['python', compiler, 'twopart_main.o'] + args, stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists(target), '\n'.join(output)
#print '\n'.join(output)
self.assertContained('is not a function', run_js(target, stderr=STDOUT))
try_delete(target)
# Combining those bc files into js should work
output = Popen(['python', compiler, 'twopart_main.o', 'twopart_side.o'] + args, stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists(target), '\n'.join(output)
self.assertContained('side got: hello from main, over', run_js(target))
# Combining bc files into another bc should also work
try_delete(target)
assert not os.path.exists(target)
output = Popen(['python', compiler, 'twopart_main.o', 'twopart_side.o', '-o', 'combined.bc'] + args, stdout=PIPE, stderr=PIPE).communicate()
syms = Building.llvm_nm('combined.bc')
assert len(syms.defs) == 2 and 'main' in syms.defs, 'Failed to generate valid bitcode'
output = Popen(['python', compiler, 'combined.bc', '-o', 'combined.bc.js'], stdout = PIPE, stderr = PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('combined.bc.js'), 'Expected %s to exist' % ('combined.bc.js')
self.assertContained('side got: hello from main, over', run_js('combined.bc.js'))
# --js-transform <transform>
self.clear()
trans = os.path.join(self.get_dir(), 't.py')
trans_file = open(trans, 'w')
trans_file.write('''
import sys
f = open(sys.argv[1], 'w')
f.write('transformed!')
f.close()
''')
trans_file.close()
output = Popen(['python', compiler, path_from_root('tests', 'hello_world' + suffix), '--js-transform', 'python t.py'], stdout=PIPE, stderr=PIPE).communicate()
assert open('a.out.js').read() == 'transformed!', 'Transformed output must be as expected'
# TODO: Add in files test a clear example of using disablePermissions, and link to it from the wiki
# TODO: test normal project linking, static and dynamic: get_library should not need to be told what to link!
# TODO: deprecate llvm optimizations, dlmalloc, etc. in emscripten.py.
def test_l_link(self):
# Linking with -lLIBNAME and -L/DIRNAME should work
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
try:
os.makedirs(os.path.join(self.get_dir(), 'libdir'));
except:
pass
open(os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib\\n");
}
''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), '-c']).communicate()
shutil.move(os.path.join(self.get_dir(), 'libfile.o'), os.path.join(self.get_dir(), 'libdir', 'libfile.so'))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile']).communicate()
self.assertContained('hello from lib', run_js(os.path.join(self.get_dir(), 'a.out.js')))
assert not os.path.exists('a.out') and not os.path.exists('a.exe'), 'Must not leave unneeded linker stubs'
def test_local_link(self):
# Linking a local library directly, like /usr/lib/libsomething.so, cannot work of course since it
# doesn't contain bitcode. However, when we see that we should look for a bitcode file for that
# library in the -L paths and system/lib
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
try:
os.makedirs(os.path.join(self.get_dir(), 'subdir'));
except:
pass
open(os.path.join(self.get_dir(), 'subdir', 'libfile.so'), 'w').write('this is not llvm bitcode!')
open(os.path.join(self.get_dir(), 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib\\n");
}
''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'libfile.cpp'), '-o', 'libfile.so']).communicate()
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), os.path.join(self.get_dir(), 'subdir', 'libfile.so'), '-L.']).communicate()
self.assertContained('hello from lib', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_identical_basenames(self):
# Issue 287: files in different dirs but with the same basename get confused as the same,
# causing multiply defined symbol errors
try:
os.makedirs(os.path.join(self.get_dir(), 'foo'));
except:
pass
try:
os.makedirs(os.path.join(self.get_dir(), 'bar'));
except:
pass
open(os.path.join(self.get_dir(), 'foo', 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
open(os.path.join(self.get_dir(), 'bar', 'main.cpp'), 'w').write('''
#include<stdio.h>
void printey() { printf("hello there\\n"); }
''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'foo', 'main.cpp'), os.path.join(self.get_dir(), 'bar', 'main.cpp')]).communicate()
self.assertContained('hello there', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# ditto with first creating .o files
try_delete(os.path.join(self.get_dir(), 'a.out.js'))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'foo', 'main.cpp'), '-o', os.path.join(self.get_dir(), 'foo', 'main.o')]).communicate()
Popen(['python', EMCC, os.path.join(self.get_dir(), 'bar', 'main.cpp'), '-o', os.path.join(self.get_dir(), 'bar', 'main.o')]).communicate()
Popen(['python', EMCC, os.path.join(self.get_dir(), 'foo', 'main.o'), os.path.join(self.get_dir(), 'bar', 'main.o')]).communicate()
self.assertContained('hello there', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_embed_file(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''hello from a file with lots of data and stuff in it thank you very much''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
int main() {
FILE *f = fopen("somefile.txt", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
return 0;
}
''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--embed-file', 'somefile.txt']).communicate()
self.assertContained('|hello from a file wi|', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_multidynamic_link(self):
# Linking the same dynamic library in will error, normally, since we statically link it, causing dupe symbols
# A workaround is to use --ignore-dynamic-linking, see emcc --help for details
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
extern void printey();
extern void printother();
int main() {
printf("*");
printey();
printf("\n");
printother();
printf("\n");
printf("*");
return 0;
}
''')
try:
os.makedirs(os.path.join(self.get_dir(), 'libdir'));
except:
pass
open(os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib");
}
''')
open(os.path.join(self.get_dir(), 'libdir', 'libother.cpp'), 'w').write('''
#include <stdio.h>
extern void printey();
void printother() {
printf("|");
printey();
printf("|");
}
''')
# This lets us link the same dynamic lib twice. We will need to link it in manually at the end.
compiler = ['python', EMCC, '--ignore-dynamic-linking']
# Build libfile normally into an .so
Popen(compiler + [os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), '-o', os.path.join(self.get_dir(), 'libdir', 'libfile.so')]).communicate()
# Build libother and dynamically link it to libfile - but add --ignore-dynamic-linking
Popen(compiler + [os.path.join(self.get_dir(), 'libdir', 'libother.cpp'), '-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile', '-o', os.path.join(self.get_dir(), 'libdir', 'libother.so')]).communicate()
# Build the main file, linking in both the libs
Popen(compiler + [os.path.join(self.get_dir(), 'main.cpp'), '-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile', '-lother', '-c']).communicate()
# The normal build system is over. We need to do an additional step to link in the dynamic libraries, since we ignored them before
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.o'), '-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile', '-lother']).communicate()
self.assertContained('*hello from lib\n|hello from lib|\n*', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_js_link(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
open(os.path.join(self.get_dir(), 'before.js'), 'w').write('''
var MESSAGE = 'hello from js';
if (typeof Module != 'undefined') throw 'This code should run before anything else!';
''')
open(os.path.join(self.get_dir(), 'after.js'), 'w').write('''
Module.print(MESSAGE);
''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'before.js', '--post-js', 'after.js']).communicate()
self.assertContained('hello from main\nhello from js\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_prepost(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {
preRun: function() { Module.print('pre-run') },
postRun: function() { Module.print('post-run') }
};
''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js']).communicate()
self.assertContained('pre-run\nhello from main\npost-run\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# never run, so no preRun or postRun
src = open(os.path.join(self.get_dir(), 'a.out.js')).read().replace('// {{PRE_RUN_ADDITIONS}}', 'addRunDependency()')
open(os.path.join(self.get_dir(), 'a.out.js'), 'w').write(src)
self.assertNotContained('pre-run\nhello from main\npost-run\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# noInitialRun prevents run
for no_initial_run in [0, 1]:
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp')]).communicate()
src = 'var Module = { noInitialRun: %d };\n' % no_initial_run + open(os.path.join(self.get_dir(), 'a.out.js')).read()
open(os.path.join(self.get_dir(), 'a.out.js'), 'w').write(src)
assert ('hello from main' in run_js(os.path.join(self.get_dir(), 'a.out.js'))) != no_initial_run, 'only run if no noInitialRun'
if no_initial_run:
# Calling main later should still work, filesystem etc. must be set up.
src = open(os.path.join(self.get_dir(), 'a.out.js')).read() + '\n_main();\n';
open(os.path.join(self.get_dir(), 'a.out.js'), 'w').write(src)
assert 'hello from main' in run_js(os.path.join(self.get_dir(), 'a.out.js')), 'main should print when called manually'
def test_eliminator(self):
input = open(path_from_root('tools', 'eliminator', 'eliminator-test.js')).read()
expected = open(path_from_root('tools', 'eliminator', 'eliminator-test-output.js')).read()
output = Popen([NODE_JS, COFFEESCRIPT, VARIABLE_ELIMINATOR], stdin=PIPE, stdout=PIPE).communicate(input)[0]
self.assertIdentical(expected, output)
def test_fix_closure(self):
input = path_from_root('tests', 'test-fix-closure.js')
expected = path_from_root('tests', 'test-fix-closure.out.js')
Popen(['python', path_from_root('tools', 'fix_closure.py'), input, 'out.js']).communicate(input)
output = open('out.js').read()
assert '0,zzz_Q_39fa,0' in output
assert 'function(a,c)' not in output # should be uninlined, so it gets a name
assert run_js(input) == run_js('out.js')
def test_js_optimizer(self):
for input, expected, passes in [
(path_from_root('tools', 'test-js-optimizer.js'), open(path_from_root('tools', 'test-js-optimizer-output.js')).read(),
['hoistMultiples', 'loopOptimizer', 'unGlobalize', 'removeAssignsToUndefined', 'simplifyExpressionsPre', 'simplifyExpressionsPost']),
(path_from_root('tools', 'test-js-optimizer-t2c.js'), open(path_from_root('tools', 'test-js-optimizer-t2c-output.js')).read(),
['simplifyExpressionsPre', 'optimizeShiftsConservative']),
(path_from_root('tools', 'test-js-optimizer-t2.js'), open(path_from_root('tools', 'test-js-optimizer-t2-output.js')).read(),
['simplifyExpressionsPre', 'optimizeShiftsAggressive']),
]:
output = Popen([NODE_JS, JS_OPTIMIZER, input] + passes, stdin=PIPE, stdout=PIPE).communicate()[0]
self.assertIdentical(expected, output.replace('\n\n', '\n'))
def test_m_mm(self):
open(os.path.join(self.get_dir(), 'foo.c'), 'w').write('''#include <emscripten.h>''')
for opt in ['M', 'MM']:
output, err = Popen(['python', EMCC, os.path.join(self.get_dir(), 'foo.c'), '-' + opt], stdout=PIPE, stderr=PIPE).communicate()
assert 'foo.o: ' in output, '-%s failed to produce the right output: %s' % (opt, output)
assert 'error' not in err, 'Unexpected stderr: ' + err
def test_llvm_nativizer(self):
# avoid impure_ptr problems etc.
shutil.copyfile(path_from_root('tests', 'files.cpp'), os.path.join(self.get_dir(), 'files.cpp'))
open(os.path.join(self.get_dir(), 'somefile.binary'), 'w').write('''waka waka############################''')
open(os.path.join(self.get_dir(), 'test.file'), 'w').write('''ay file..............,,,,,,,,,,,,,,''')
open(os.path.join(self.get_dir(), 'stdin'), 'w').write('''inter-active''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'files.cpp'), '-c']).communicate()
Popen(['python', path_from_root('tools', 'nativize_llvm.py'), os.path.join(self.get_dir(), 'files.o')]).communicate(input)[0]
output = Popen([os.path.join(self.get_dir(), 'files.o.run')], stdin=open(os.path.join(self.get_dir(), 'stdin')), stdout=PIPE, stderr=PIPE).communicate()
self.assertIdentical('''size: 37
data: 119,97,107,97,32,119,97,107,97,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35
loop: 119 97 107 97 32 119 97 107 97 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35
input:inter-active
texto
$
5 : 10,30,20,11,88
other=ay file...
seeked= file.
seeked=e...
seeked=,,.
fscanfed: 10 - hello
''', output[0])
self.assertIdentical('texte\n', output[1])
elif 'browser' in str(sys.argv):
# Browser tests.
class browser(RunnerCore):
def __init__(self, *args, **kwargs):
super(browser, self).__init__(*args, **kwargs)
if hasattr(browser, 'harness_server'): return
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def server_func(q):
class TestServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
if s.path == '/run_harness':
s.wfile.write(open(path_from_root('tests', 'browser_harness.html')).read())
else:
result = 'False'
if not q.empty():
result = q.get()
s.wfile.write(result)
s.wfile.close()
httpd = BaseHTTPServer.HTTPServer(('localhost', 9999), TestServerHandler)
httpd.serve_forever() # test runner will kill us
browser.harness_queue = multiprocessing.Queue()
browser.harness_server = multiprocessing.Process(target=server_func, args=(browser.harness_queue,))
browser.harness_server.start()
print '[Browser harness server on process %d]' % browser.harness_server.pid
webbrowser.open_new('http://localhost:9999/run_harness')
def __del__(self):
if not hasattr(browser, 'harness_server'): return
browser.harness_server.terminate()
delattr(browser, 'harness_server')
print '[Browser harness server terminated]'
def run_browser(self, html_file, message, expectedResult=None):
if expectedResult is not None:
try:
def server_func(q):
class TestServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(s):
if 'report_' in s.path:
q.put(s.path)
else:
filename = s.path[1:]
if os.path.exists(filename):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(open(filename).read())
s.wfile.close()
else:
s.send_response(500)
s.send_header("Content-type", "text/html")
s.end_headers()
os.chdir(self.get_dir())
httpd = BaseHTTPServer.HTTPServer(('localhost', 8888), TestServerHandler)
httpd.serve_forever() # test runner will kill us
queue = multiprocessing.Queue()
server = multiprocessing.Process(target=server_func, args=(queue,))
server.start()
browser.harness_queue.put('http://localhost:8888/' + html_file)
output = '[no http server activity]'
start = time.time()
while time.time() - start < 5:
if not queue.empty():
output = queue.get()
break
time.sleep(0.1)
self.assertIdentical(expectedResult, output)
finally:
server.terminate()
else:
webbrowser.open_new(os.path.abspath(html_file))
print 'A web browser window should have opened a page containing the results of a part of this test.'
print 'You need to manually look at the page to see that it works ok: ' + message
print '(sleeping for a bit to keep the directory alive for the web browser..)'
time.sleep(5)
print '(moving on..)'
def with_report_result(self, code):
return code.replace('REPORT_RESULT();', '''
char output[1000];
sprintf(output,
"xhr = new XMLHttpRequest();"
"xhr.open('GET', 'http://localhost:8888/report_result?%d');"
"xhr.send();", result);
emscripten_run_script(output);
emscripten_run_script("setTimeout(function() { window.close() }, 1000)");
''')
def reftest(self, expected):
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'reftest.js'), 'w').write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
for (var x = 0; x < img.width; x++) {
for (var y = 0; y < img.height; y++) {
total += Math.abs(expected[y*img.width*4 + x*4 + 0] - actual[y*img.width*4 + x*4 + 0]);
total += Math.abs(expected[y*img.width*4 + x*4 + 1] - actual[y*img.width*4 + x*4 + 1]);
total += Math.abs(expected[y*img.width*4 + x*4 + 2] - actual[y*img.width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + wrong);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
Module['postRun'] = doReftest;
Module['preRun'] = function() {
setTimeout(doReftest, 0); // if run() throws an exception and postRun is not called, this will kick in
};
''' % basename)
def test_html(self):
# test HTML generation.
self.reftest(path_from_root('tests', 'htmltest.png'))
output = Popen(['python', EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-o', 'something.html', '--pre-js', 'reftest.js']).communicate()
self.run_browser('something.html', 'You should see "hello, world!" and a colored cube.', '/report_result?0')
def test_compression(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("hello compressed world\n");
int result = 1;
REPORT_RESULT();
return 0;
}
'''))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'page.js')), 'must be side js'
assert os.path.exists(os.path.join(self.get_dir(), 'page.js.compress')), 'must be side compressed js'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size > os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be smaller'
shutil.move(os.path.join(self.get_dir(), 'page.js'), 'page.js.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_preload_file(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("somefile.txt", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT();
return 0;
}
'''))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'somefile.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT();
return 0;
}
'''))
# by individual files
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html']).communicate()
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_compressed_file(self):
open(os.path.join(self.get_dir(), 'datafile.txt'), 'w').write('compress this please' + (2000*'.'))
open(os.path.join(self.get_dir(), 'datafile2.txt'), 'w').write('moar' + (100*'!'))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[21];
FILE *f = fopen("datafile.txt", "r");
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("file says: |%s|\n", buf);
int result = !strcmp("compress this please", buf);
FILE *f2 = fopen("datafile2.txt", "r");
fread(buf, 1, 5, f2);
buf[5] = 0;
fclose(f2);
result = result && !strcmp("moar!", buf);
printf("file 2 says: |%s|\n", buf);
REPORT_RESULT();
return 0;
}
'''))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html', '--preload-file', 'datafile.txt', '--preload-file', 'datafile2.txt',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'datafile.txt')), 'must be data file'
assert os.path.exists(os.path.join(self.get_dir(), 'page.data.compress')), 'must be data file in compressed form'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size != os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be different'
shutil.move(os.path.join(self.get_dir(), 'datafile.txt'), 'datafile.txt.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_image(self):
# load an image file, get pixel data
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '--preload-file', 'screenshot.jpg', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_compressed(self):
for image, width in [(path_from_root('tests', 'screenshot2.png'), 300),
(path_from_root('tests', 'screenshot.jpg'), 600)]:
self.clear()
print image
basename = os.path.basename(image)
shutil.copyfile(image, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()).replace('screenshot.jpg', basename))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '--preload-file', basename, '-o', 'page.html',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
shutil.move(os.path.join(self.get_dir(), basename), basename + '.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?' + str(width))
def test_sdl_canvas(self):
open(os.path.join(self.get_dir(), 'sdl_canvas.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_canvas.c')).read()))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'sdl_canvas.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_key(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
_one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
dispatchEvent(event);
var event2 = document.createEvent("KeyboardEvent");
event2.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
dispatchEvent(event2);
}
''')
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?510510')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?740')
def test_sdl_audio(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmvictory_1.ogg'), os.path.join(self.get_dir(), 'sound.ogg'))
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmcreatemiltaryfoot_1.wav'), os.path.join(self.get_dir(), 'sound2.wav'))
open(os.path.join(self.get_dir(), 'sdl_audio.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio.c')).read()))
# use closure to check for a possible bug with closure minifying away newer Audio() attributes
Popen(['python', EMCC, '-O2', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio.c'), '--preload-file', 'sound.ogg', '--preload-file', 'sound2.wav', '-o', 'page.html', '-s', 'EXPORTED_FUNCTIONS=["_main", "_play", "_play2"]']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_worker(self):
# Test running in a web worker
output = Popen(['python', EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'], stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('worker.js'), output
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''')
html_file.close()
self.run_browser('main.html', 'You should see that the worker was called, and said "hello from worker!"', '/report_result?hello%20from%20worker!')
def test_glgears(self):
self.reftest(path_from_root('tests', 'gears.png'))
Popen(['python', EMCC, path_from_root('tests', 'hello_world_gles.c'), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '--pre-js', 'reftest.js']).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_result?0')
def test_glgears_animation(self):
Popen(['python', EMCC, path_from_root('tests', 'hello_world_gles.c'), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')]).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
def test_glgears_bad(self):
# Make sure that OpenGL ES is not available if typed arrays are not used
Popen(['python', EMCC, path_from_root('tests', 'hello_world_gles.c'), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS',
'-s', 'USE_TYPED_ARRAYS=0',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')]).communicate()
self.run_browser('something.html', 'You should not see animating gears.', '/report_gl_result?false')
def test_glgears_deriv(self):
self.reftest(path_from_root('tests', 'gears.png'))
Popen(['python', EMCC, path_from_root('tests', 'hello_world_gles_deriv.c'), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '--pre-js', 'reftest.js']).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_result?0')
src = open('something.html').read()
assert 'gl-matrix' not in src, 'Should not include glMatrix when not needed'
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
for program in programs:
print program
basename = os.path.basename(program)
args = []
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args = ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args = ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.reftest(path_from_root('tests', 'glbook', basename.replace('.bc', '.png')))
Popen(['python', EMCC, program, '-o', 'program.html', '--pre-js', 'reftest.js'] + args).communicate()
self.run_browser('program.html', '', '/report_result?0')
elif 'benchmark' in str(sys.argv):
# Benchmarks. Run them with argument |benchmark|. To run a specific test, do
# |benchmark.test_X|.
fingerprint = [time.asctime()]
try:
fingerprint.append('em: ' + Popen(['git', 'show'], stdout=PIPE).communicate()[0].split('\n')[0])
except:
pass
try:
d = os.getcwd()
os.chdir(os.path.expanduser('~/Dev/mozilla-central'))
fingerprint.append('sm: ' + filter(lambda line: 'changeset' in line,
Popen(['hg', 'tip'], stdout=PIPE).communicate()[0].split('\n'))[0])
except:
pass
finally:
os.chdir(d)
fingerprint.append('llvm: ' + LLVM_ROOT)
print 'Running Emscripten benchmarks... [ %s ]' % ' | '.join(fingerprint)
sys.argv = filter(lambda x: x != 'benchmark', sys.argv)
assert(os.path.exists(CLOSURE_COMPILER))
try:
index = SPIDERMONKEY_ENGINE.index("options('strict')")
SPIDERMONKEY_ENGINE = SPIDERMONKEY_ENGINE[:index-1] + SPIDERMONKEY_ENGINE[index+1:] # closure generates non-strict
except:
pass
Building.COMPILER = CLANG
# Pick the JS engine to benchmark. If you specify one, it will be picked. For example, python tests/runner.py benchmark SPIDERMONKEY_ENGINE
JS_ENGINE = JS_ENGINES[0]
for i in range(1, len(sys.argv)):
arg = sys.argv[i]
if not arg.startswith('benchmark.test_'):
JS_ENGINE = eval(arg)
sys.argv[i] = None
sys.argv = filter(lambda arg: arg is not None, sys.argv)
print 'Benchmarking JS engine:', JS_ENGINE
Building.COMPILER_TEST_OPTS = []
TEST_REPS = 10
TOTAL_TESTS = 9
tests_done = 0
total_times = map(lambda x: 0., range(TOTAL_TESTS))
total_native_times = map(lambda x: 0., range(TOTAL_TESTS))
class benchmark(RunnerCore):
def print_stats(self, times, native_times, last=False):
mean = sum(times)/len(times)
squared_times = map(lambda x: x*x, times)
mean_of_squared = sum(squared_times)/len(times)
std = math.sqrt(mean_of_squared - mean*mean)
sorted_times = times[:]
sorted_times.sort()
median = sum(sorted_times[len(sorted_times)/2 - 1:len(sorted_times)/2 + 1])/2
mean_native = sum(native_times)/len(native_times)
squared_native_times = map(lambda x: x*x, native_times)
mean_of_squared_native = sum(squared_native_times)/len(native_times)
std_native = math.sqrt(mean_of_squared_native - mean_native*mean_native)
sorted_native_times = native_times[:]
sorted_native_times.sort()
median_native = sum(sorted_native_times[len(sorted_native_times)/2 - 1:len(sorted_native_times)/2 + 1])/2
final = mean / mean_native
if last:
norm = 0
for i in range(len(times)):
norm += times[i]/native_times[i]
norm /= len(times)
print
print ' JavaScript: %.3f Native: %.3f Ratio: %.3f Normalized ratio: %.3f' % (mean, mean_native, final, norm)
return
print
print ' JavaScript: mean: %.3f (+-%.3f) secs median: %.3f range: %.3f-%.3f (noise: %3.3f%%) (%d runs)' % (mean, std, median, min(times), max(times), 100*std/mean, TEST_REPS)
print ' Native : mean: %.3f (+-%.3f) secs median: %.3f range: %.3f-%.3f (noise: %3.3f%%) JS is %.2f X slower' % (mean_native, std_native, median_native, min(native_times), max(native_times), 100*std_native/mean_native, final)
def do_benchmark(self, src, args=[], expected_output='FAIL', emcc_args=[]):
dirname = self.get_dir()
filename = os.path.join(dirname, 'src.cpp')
f = open(filename, 'w')
f.write(src)
f.close()
final_filename = os.path.join(dirname, 'src.js')
try_delete(final_filename)
output = Popen(['python', EMCC, filename, '-O3',
'-s', 'INLINING_LIMIT=0',
'-s', 'TOTAL_MEMORY=100*1024*1024', '-s', 'FAST_MEMORY=10*1024*1024',
'-o', final_filename] + emcc_args, stdout=PIPE, stderr=self.stderr_redirect).communicate()
assert os.path.exists(final_filename), 'Failed to compile file: ' + '\n'.join(output)
# Run JS
global total_times, tests_done
times = []
for i in range(TEST_REPS):
start = time.time()
js_output = self.run_generated_code(JS_ENGINE, final_filename, args, check_timeout=False)
curr = time.time()-start
times.append(curr)
total_times[tests_done] += curr
if i == 0:
# Sanity check on output
self.assertContained(expected_output, js_output)
# Run natively
self.build_native(filename)
global total_native_times
native_times = []
for i in range(TEST_REPS):
start = time.time()
self.run_native(filename, args)
curr = time.time()-start
native_times.append(curr)
total_native_times[tests_done] += curr
self.print_stats(times, native_times)
tests_done += 1
if tests_done == TOTAL_TESTS:
print 'Total stats:',
self.print_stats(total_times, total_native_times, last=True)
def test_primes(self):
src = '''
#include<stdio.h>
#include<math.h>
int main() {
int primes = 0, curri = 2;
while (primes < 100000) {
int ok = true;
for (int j = 2; j < sqrtf(curri); j++) {
if (curri % j == 0) {
ok = false;
break;
}
}
if (ok) {
primes++;
}
curri++;
}
printf("lastprime: %d.\\n", curri-1);
return 1;
}
'''
self.do_benchmark(src, [], 'lastprime: 1297001.')
def test_memops(self):
src = '''
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
int main() {
int N = 1024*1024;
int M = 190;
int final = 0;
char *buf = (char*)malloc(N);
for (int t = 0; t < M; t++) {
for (int i = 0; i < N; i++)
buf[i] = (i + final)%256;
for (int i = 0; i < N; i++)
final += buf[i] & 1;
final = final % 1000;
}
printf("final: %d.\\n", final);
return 1;
}
'''
self.do_benchmark(src, [], 'final: 720.')
def zzztest_files(self):
src = r'''
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
#include <unistd.h>
int main() {
int N = 100;
int M = 1000;
int K = 1000;
unsigned char *k = (unsigned char*)malloc(K+1), *k2 = (unsigned char*)malloc(K+1);
for (int i = 0; i < K; i++) {
k[i] = (i % 250) + 1;
}
k[K] = 0;
char buf[100];
for (int i = 0; i < N; i++) {
sprintf(buf, "/dev/shm/file-%d.dat", i);
FILE *f = fopen(buf, "w");
for (int j = 0; j < M; j++) {
fwrite(k, 1, (j % K) + 1, f);
}
fclose(f);
}
for (int i = 0; i < N; i++) {
sprintf(buf, "/dev/shm/file-%d.dat", i);
FILE *f = fopen(buf, "r");
for (int j = 0; j < M; j++) {
fread(k2, 1, (j % K) + 1, f);
}
fclose(f);
for (int j = 0; j < K; j++) {
assert(k[j] == k2[j]);
}
unlink(buf);
}
printf("ok");
return 1;
}
'''
self.do_benchmark(src, [], 'ok')
def test_copy(self):
src = r'''
#include<stdio.h>
struct vec {
int x, y, z;
int r, g, b;
vec(int x_, int y_, int z_, int r_, int g_, int b_) : x(x_), y(y_), z(z_), r(r_), g(g_), b(b_) {}
static vec add(vec a, vec b) {
return vec(a.x+b.x, a.y+b.y, a.z+b.z, a.r+b.r, a.g+b.g, a.b+b.b);
}
void norm() {
x %= 1024;
y %= 1024;
z %= 1024;
r %= 1024;
b %= 1024;
g %= 1024;
}
int sum() { return x + y + z + r + g + b; }
};
int main() {
int total = 0;
for (int i = 0; i < 1250; i++) {
for (int j = 0; j < 1000; j++) {
vec c(i, i+i%10, j*2, i%255, j%120, i%15);
vec d(j+i%10, j*2, j%255, i%120, j%15, j);
vec e = c;
c.norm();
d.norm();
vec f = vec::add(c, d);
f = vec::add(e, f);
f.norm();
f = vec::add(d, f);
total += f.sum() % 100;
total %= 10240;
}
}
printf("sum:%d\n", total);
return 1;
}
'''
self.do_benchmark(src, [], 'sum:9928\n', emcc_args=['-s', 'QUANTUM_SIZE=4', '-s', 'USE_TYPED_ARRAYS=2'])
def test_fannkuch(self):
src = open(path_from_root('tests', 'fannkuch.cpp'), 'r').read()
self.do_benchmark(src, ['10'], 'Pfannkuchen(10) = 38.')
def test_corrections(self):
src = r'''
#include<stdio.h>
#include<math.h>
int main() {
int N = 4100;
int M = 4100;
unsigned int f = 0;
unsigned short s = 0;
for (int t = 0; t < M; t++) {
for (int i = 0; i < N; i++) {
f += i / ((t % 5)+1);
if (f > 1000) f /= (t % 3)+1;
if (i % 4 == 0) f += sqrtf(i) * (i % 8 == 0 ? 1 : -1);
s += (short(f)*short(f)) % 256;
}
}
printf("final: %d:%d.\n", f, s);
return 1;
}
'''
self.do_benchmark(src, [], 'final: 826:14324.', emcc_args=['-s', 'CORRECT_SIGNS=1', '-s', 'CORRECT_OVERFLOWS=1', '-s', 'CORRECT_ROUNDINGS=1'])
def fasta(self, double_rep):
src = open(path_from_root('tests', 'fasta.cpp'), 'r').read().replace('double', double_rep)
self.do_benchmark(src, ['2100000'], '''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA\nTCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACT\nAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAG\nGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCG\nCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGT\nGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCA\nGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAA\nTTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAG\nAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCA\nGCCTGGGCGA''')
def test_fasta_float(self):
self.fasta('float')
def zzztest_fasta_double(self):
self.fasta('double')
def test_skinning(self):
src = open(path_from_root('tests', 'skinning_test_no_simd.cpp'), 'r').read()
self.do_benchmark(src, ['10000', '1000'], 'blah=0.000000')
def test_dlmalloc(self):
# XXX This seems to have regressed slightly with emcc. Are -g and the signs lines passed properly?
src = open(path_from_root('system', 'lib', 'dlmalloc.c'), 'r').read() + '\n\n\n' + open(path_from_root('tests', 'dlmalloc_test.c'), 'r').read()
self.do_benchmark(src, ['400', '400'], '*400,0*', emcc_args=['-g', '-s', 'CORRECT_SIGNS=2', '-s', 'CORRECT_SIGNS_LINES=[4820, 4195, 4250, 4203, 4209, 4239, 4231]'])
elif 'sanity' in str(sys.argv):
# Run some sanity checks on the test runner and emcc.
sys.argv = filter(lambda x: x != 'sanity', sys.argv)
print
print 'Running sanity checks.'
print 'WARNING: This will modify %s, and in theory can break it although it should be restored properly. A backup will be saved in %s_backup' % (EM_CONFIG, EM_CONFIG)
print
assert os.path.exists(CONFIG_FILE), 'To run these tests, we need a (working!) %s file to already exist' % EM_CONFIG
shutil.copyfile(CONFIG_FILE, CONFIG_FILE + '_backup')
def restore():
shutil.copyfile(CONFIG_FILE + '_backup', CONFIG_FILE)
SANITY_FILE = CONFIG_FILE + '_sanity'
def wipe():
try_delete(CONFIG_FILE)
try_delete(SANITY_FILE)
commands = [[EMCC], ['python', path_from_root('tests', 'runner.py'), 'blahblah']]
def mtime(filename):
return os.stat(filename).st_mtime
class sanity(RunnerCore):
def setUp(self):
wipe()
def tearDown(self):
restore()
def do(self, command):
if type(command) is not list:
command = [command]
if command[0] == EMCC:
command = ['python'] + command
return Popen(command, stdout=PIPE, stderr=STDOUT).communicate()[0]
def check_working(self, command, expected=None):
if type(command) is not list:
command = [command]
if expected is None:
if command[0] == EMCC:
expected = 'no input files'
else:
expected = "has no attribute 'blahblah'"
output = self.do(command)
self.assertContained(expected, output)
return output
def test_aaa_normal(self): # this should be the very first thing that runs. if this fails, everything else is irrelevant!
for command in commands:
# Your existing EM_CONFIG should work!
restore()
self.check_working(command)
def test_firstrun(self):
for command in commands:
wipe()
output = self.do(command)
self.assertContained('Welcome to Emscripten!', output)
self.assertContained('This is the first time any of the Emscripten tools has been run.', output)
self.assertContained('A settings file has been copied to %s, at absolute path: %s' % (EM_CONFIG, CONFIG_FILE), output)
self.assertContained('Please edit that file and change the paths to fit your system', output)
self.assertContained('make sure LLVM_ROOT and NODE_JS are correct', output)
self.assertContained('This command will now exit. When you are done editing those paths, re-run it.', output)
assert output.split()[-1].endswith('===='), 'We should have stopped: ' + output
assert (open(CONFIG_FILE).read() == open(path_from_root('settings.py')).read()), 'Settings should be copied from settings.py'
# Second run, with bad EM_CONFIG
for settings in ['blah', 'LLVM_ROOT="blah"; JS_ENGINES=[]; COMPILER_ENGINE=NODE_JS=SPIDERMONKEY_ENGINE=[]']:
f = open(CONFIG_FILE, 'w')
f.write(settings)
f.close()
output = self.do(command)
if 'LLVM_ROOT' not in settings:
self.assertContained('Error in evaluating %s' % EM_CONFIG, output)
else:
self.assertContained('FATAL', output) # sanity check should fail
def test_closure_compiler(self):
CLOSURE_FATAL = 'fatal: Closure compiler'
CLOSURE_WARNING = 'WARNING: Closure compiler'
# Sanity check should find closure
restore()
output = self.check_working(EMCC)
self.assertNotContained(CLOSURE_FATAL, output)
self.assertNotContained(CLOSURE_WARNING, output)
# Append a bad path for closure, will warn
f = open(CONFIG_FILE, 'a')
f.write('CLOSURE_COMPILER = "/tmp/nowhere/nothingtoseehere/kjadsfkjwelkjsdfkqgas/nonexistent.txt"\n')
f.close()
output = self.check_working(EMCC, CLOSURE_WARNING)
# And if you actually try to use the bad path, will be fatal
f = open(CONFIG_FILE, 'a')
f.write('CLOSURE_COMPILER = "/tmp/nowhere/nothingtoseehere/kjadsfkjwelkjsdfkqgas/nonexistent.txt"\n')
f.close()
output = self.check_working([EMCC, '-O2', 'tests/hello_world.cpp'], CLOSURE_FATAL)
# With a working path, all is well
restore()
try_delete('a.out.js')
output = self.check_working([EMCC, '-O2', 'tests/hello_world.cpp'], '')
assert os.path.exists('a.out.js')
def test_emcc(self):
SANITY_MESSAGE = 'Emscripten: Running sanity checks'
SANITY_FAIL_MESSAGE = 'sanity check failed to run'
# emcc should check sanity if no ${EM_CONFIG}_sanity
restore()
time.sleep(0.1)
assert not os.path.exists(SANITY_FILE) # restore is just the settings, not the sanity
output = self.check_working(EMCC)
self.assertContained(SANITY_MESSAGE, output)
assert os.path.exists(SANITY_FILE) # EMCC should have checked sanity successfully
assert mtime(SANITY_FILE) >= mtime(CONFIG_FILE)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# emcc run again should not sanity check, because the sanity file is newer
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# But the test runner should
output = self.check_working(commands[1])
self.assertContained(SANITY_MESSAGE, output)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# Make sure the test runner didn't do anything to the setup
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# emcc should also check sanity if the file is outdated
time.sleep(0.1)
restore()
assert mtime(SANITY_FILE) < mtime(CONFIG_FILE)
output = self.check_working(EMCC)
self.assertContained(SANITY_MESSAGE, output)
assert mtime(SANITY_FILE) >= mtime(CONFIG_FILE)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
def test_emcc_caching(self):
INCLUDING_MESSAGE = 'emcc: including X'
BUILDING_MESSAGE = 'emcc: building X for cache'
EMCC_CACHE = Cache.dirname
restore()
Cache.erase()
assert not os.path.exists(EMCC_CACHE)
try:
emcc_debug = os.environ.get('EMCC_DEBUG')
os.environ['EMCC_DEBUG'] ='1'
# Building a file that doesn't need cached stuff should not trigger cache generation
output = self.do([EMCC, path_from_root('tests', 'hello_world.cpp')])
assert INCLUDING_MESSAGE.replace('X', 'dlmalloc') not in output
assert BUILDING_MESSAGE.replace('X', 'dlmalloc') not in output
self.assertContained('hello, world!', run_js('a.out.js'))
assert not os.path.exists(EMCC_CACHE)
try_delete('a.out.js')
basebc_name = os.path.join(TEMP_DIR, 'emscripten_temp', 'emcc-0-basebc.bc')
dcebc_name = os.path.join(TEMP_DIR, 'emscripten_temp', 'emcc-1-dce.bc')
# Building a file that *does* need dlmalloc *should* trigger cache generation, but only the first time
for filename, libname in [('hello_malloc.cpp', 'dlmalloc'), ('hello_libcxx.cpp', 'libcxx')]:
for i in range(3):
try_delete(basebc_name) # we might need to check this file later
try_delete(dcebc_name) # we might need to check this file later
output = self.do([EMCC, path_from_root('tests', filename)])
assert INCLUDING_MESSAGE.replace('X', libname) in output
if libname == 'dlmalloc':
assert INCLUDING_MESSAGE.replace('X', 'libcxx') not in output # we don't need libcxx in this code
else:
assert INCLUDING_MESSAGE.replace('X', 'dlmalloc') in output # libcxx always forces inclusion of dlmalloc
assert (BUILDING_MESSAGE.replace('X', libname) in output) == (i == 0), 'Must only build the first time'
self.assertContained('hello, world!', run_js('a.out.js'))
assert os.path.exists(EMCC_CACHE)
assert os.path.exists(os.path.join(EMCC_CACHE, libname + '.bc'))
if libname == 'libcxx':
assert os.stat(os.path.join(EMCC_CACHE, libname + '.bc')).st_size > 4000000, 'libc++ is big'
assert os.stat(basebc_name).st_size > 4000000, 'libc++ is indeed big'
assert os.stat(dcebc_name).st_size < 2000000, 'Dead code elimination must remove most of libc++'
finally:
if emcc_debug:
os.environ['EMCC_DEBUG'] = emcc_debug
else:
raise Exception('Test runner is confused: ' + str(sys.argv))
if __name__ == '__main__':
sys.argv = [sys.argv[0]] + ['-v'] + sys.argv[1:] # Verbose output by default
# Sanity checks
check_sanity(force=True)
total_engines = len(JS_ENGINES)
JS_ENGINES = filter(check_engine, JS_ENGINES)
if len(JS_ENGINES) == 0:
print 'WARNING: None of the JS engines in JS_ENGINES appears to work.'
elif len(JS_ENGINES) < total_engines:
print 'WARNING: Not all the JS engines in JS_ENGINES appears to work, ignoring those.'
# Go
unittest.main()
|
gdaltest_python2.py
|
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Python Library supporting GDAL/OGR Test Suite
# Author: Even Rouault, <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2009-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import urllib2
import socket
import os
import subprocess
import shlex
import sys
from Queue import Queue
from threading import Thread
def run_func(func):
try:
result = func()
print(result)
return result
except SystemExit as x:
import traceback
traceback.print_exc()
raise x
except Exception: # pylint: disable=broad-except
result = 'fail (blowup)'
print(result)
import traceback
traceback.print_exc()
return result
def urlescape(url):
# Escape any non-ASCII characters
try:
import urllib
url = urllib.quote(url)
except ImportError:
pass
return url
def gdalurlopen(url, timeout=10):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
proxy = None
if 'GDAL_HTTP_PROXY' in os.environ:
proxy = os.environ['GDAL_HTTP_PROXY']
protocol = 'http'
if 'GDAL_HTTPS_PROXY' in os.environ and url.startswith('https'):
proxy = os.environ['GDAL_HTTPS_PROXY']
protocol = 'https'
if proxy is not None:
if 'GDAL_HTTP_PROXYUSERPWD' in os.environ:
proxyuserpwd = os.environ['GDAL_HTTP_PROXYUSERPWD']
proxyHandler = urllib2.ProxyHandler({"%s" % protocol:
"%s://%s@%s" % (protocol, proxyuserpwd, proxy)})
else:
proxyuserpwd = None
proxyHandler = urllib2.ProxyHandler({"%s" % protocol:
"%s://%s" % (protocol, proxy)})
opener = urllib2.build_opener(proxyHandler, urllib2.HTTPHandler)
urllib2.install_opener(opener)
try:
handle = urllib2.urlopen(url)
socket.setdefaulttimeout(old_timeout)
return handle
except urllib2.HTTPError as e:
print('HTTP service for %s is down (HTTP Error: %d)' % (url, e.code))
socket.setdefaulttimeout(old_timeout)
return None
except urllib2.URLError as e:
print('HTTP service for %s is down (HTTP Error: %s)' % (url, e.reason))
socket.setdefaulttimeout(old_timeout)
return None
except socket.timeout:
print('HTTP service for %s is down (timeout)' % url)
socket.setdefaulttimeout(old_timeout)
return None
def warn_if_memleak(cmd, out_str):
# If DEBUG_VSIMALLOC_STATS is defined, this is an easy way
# to catch some memory leaks
if cmd.find('--utility_version') == -1 and \
out_str.find('VSIMalloc + VSICalloc - VSIFree') != -1 and \
out_str.find('VSIMalloc + VSICalloc - VSIFree : 0') == -1:
print('memory leak detected')
print(out_str)
def spawn_async(cmd):
command = shlex.split(cmd)
try:
process = subprocess.Popen(command, stdout=subprocess.PIPE)
return (process, process.stdout)
except OSError:
return (None, None)
def wait_process(process):
process.wait()
def runexternal(cmd, strin=None, check_memleak=True, display_live_on_parent_stdout=False, encoding=None):
# pylint: disable=unused-argument
command = shlex.split(cmd)
command = [elt.replace('\x00', '') for elt in command]
if strin is None:
p = subprocess.Popen(command, stdout=subprocess.PIPE)
else:
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.write(strin)
p.stdin.close()
if p.stdout is not None:
if display_live_on_parent_stdout:
ret = ''
ret_stdout = p.stdout
while True:
c = ret_stdout.read(1)
if c == '':
break
ret = ret + c
sys.stdout.write(c)
else:
ret = p.stdout.read()
else:
ret = ''
waitcode = p.wait()
if waitcode != 0:
ret = ret + '\nERROR ret code = %d' % waitcode
if encoding is not None:
ret = ret.decode(encoding)
return ret
def read_in_thread(f, q):
q.put(f.read())
f.close()
# Compatible with Python 2.6 or above
def _runexternal_out_and_err_subprocess(cmd, check_memleak=True, encoding=None):
# pylint: disable=unused-argument
command = shlex.split(cmd)
command = [elt.replace('\x00', '') for elt in command]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.stdout is not None:
q_stdout = Queue()
t_stdout = Thread(target=read_in_thread, args=(p.stdout, q_stdout))
t_stdout.start()
else:
q_stdout = None
ret_stdout = ''
if p.stderr is not None:
q_stderr = Queue()
t_stderr = Thread(target=read_in_thread, args=(p.stderr, q_stderr))
t_stderr.start()
else:
q_stderr = None
ret_stderr = ''
if q_stdout is not None:
ret_stdout = q_stdout.get()
if q_stderr is not None:
ret_stderr = q_stderr.get()
waitcode = p.wait()
if waitcode != 0:
ret_stderr = ret_stderr + '\nERROR ret code = %d' % waitcode
if encoding is not None:
ret_stdout = ret_stdout.decode(encoding)
return (ret_stdout, ret_stderr)
def runexternal_out_and_err(cmd, check_memleak=True, encoding=None):
return _runexternal_out_and_err_subprocess(cmd, check_memleak=check_memleak, encoding=encoding)
|
execute.py
|
"""
Once state information has been calculated, handle actually executing tools
from various states, tracking results, and building implicit dataset
collections from matched collections.
"""
import collections
import logging
from threading import Thread
from six.moves.queue import Queue
from galaxy.tools.actions import on_text_for_names, ToolExecutionCache
from galaxy.tools.parser import ToolOutputCollectionPart
from galaxy.util import ExecutionTimer
log = logging.getLogger( __name__ )
EXECUTION_SUCCESS_MESSAGE = "Tool [%s] created job [%s] %s"
def execute( trans, tool, param_combinations, history, rerun_remap_job_id=None, collection_info=None, workflow_invocation_uuid=None ):
"""
Execute a tool and return object containing summary (output data, number of
failures, etc...).
"""
all_jobs_timer = ExecutionTimer()
execution_tracker = ToolExecutionTracker( tool, param_combinations, collection_info )
app = trans.app
execution_cache = ToolExecutionCache(trans)
def execute_single_job(params):
job_timer = ExecutionTimer()
if workflow_invocation_uuid:
params[ '__workflow_invocation_uuid__' ] = workflow_invocation_uuid
elif '__workflow_invocation_uuid__' in params:
# Only workflow invocation code gets to set this, ignore user supplied
# values or rerun parameters.
del params[ '__workflow_invocation_uuid__' ]
job, result = tool.handle_single_execution( trans, rerun_remap_job_id, params, history, collection_info, execution_cache )
if job:
message = EXECUTION_SUCCESS_MESSAGE % (tool.id, job.id, job_timer)
log.debug(message)
execution_tracker.record_success( job, result )
else:
execution_tracker.record_error( result )
config = app.config
burst_at = getattr( config, 'tool_submission_burst_at', 10 )
burst_threads = getattr( config, 'tool_submission_burst_threads', 1 )
tool_action = tool.action
if hasattr( tool_action, "check_inputs_ready" ):
for params in execution_tracker.param_combinations:
# This will throw an exception if the tool is not ready.
tool_action.check_inputs_ready(
tool,
trans,
params,
history
)
job_count = len(execution_tracker.param_combinations)
if job_count < burst_at or burst_threads < 2:
for params in execution_tracker.param_combinations:
execute_single_job(params)
else:
q = Queue()
def worker():
while True:
params = q.get()
execute_single_job(params)
q.task_done()
for i in range(burst_threads):
t = Thread(target=worker)
t.daemon = True
t.start()
for params in execution_tracker.param_combinations:
q.put(params)
q.join()
log.debug("Executed %d job(s) for tool %s request: %s" % (job_count, tool.id, all_jobs_timer))
if collection_info:
history = history or tool.get_default_history_by_trans( trans )
if len(param_combinations) == 0:
template = "Attempting to map over an empty collection, this is not yet implemented. colleciton_info is [%s]"
message = template % collection_info
log.warn(message)
raise Exception(message)
params = param_combinations[0]
execution_tracker.create_output_collections( trans, history, params )
return execution_tracker
class ToolExecutionTracker( object ):
def __init__( self, tool, param_combinations, collection_info ):
self.tool = tool
self.param_combinations = param_combinations
self.collection_info = collection_info
self.successful_jobs = []
self.failed_jobs = 0
self.execution_errors = []
self.output_datasets = []
self.output_collections = []
self.outputs_by_output_name = collections.defaultdict(list)
self.implicit_collections = {}
def record_success( self, job, outputs ):
self.successful_jobs.append( job )
self.output_datasets.extend( outputs )
for output_name, output_dataset in outputs:
if ToolOutputCollectionPart.is_named_collection_part_name( output_name ):
# Skip known collection outputs, these will be covered by
# output collections.
continue
self.outputs_by_output_name[ output_name ].append( output_dataset )
for job_output in job.output_dataset_collections:
self.outputs_by_output_name[ job_output.name ].append( job_output.dataset_collection )
for job_output in job.output_dataset_collection_instances:
self.output_collections.append( ( job_output.name, job_output.dataset_collection_instance ) )
def record_error( self, error ):
self.failed_jobs += 1
message = "There was a failure executing a job for tool [%s] - %s"
log.warning(message, self.tool.id, error)
self.execution_errors.append( error )
def create_output_collections( self, trans, history, params ):
# TODO: Move this function - it doesn't belong here but it does need
# the information in this class and potential extensions.
if self.failed_jobs > 0:
return []
structure = self.collection_info.structure
# params is just one sample tool param execution with parallelized
# collection replaced with a specific dataset. Need to replace this
# with the collection and wrap everything up so can evaluate output
# label.
params.update( self.collection_info.collections ) # Replace datasets with source collections for labelling outputs.
collection_names = ["collection %d" % c.hid for c in self.collection_info.collections.values()]
on_text = on_text_for_names( collection_names )
collections = {}
implicit_inputs = list(self.collection_info.collections.items())
for output_name, outputs in self.outputs_by_output_name.items():
if not len( structure ) == len( outputs ):
# Output does not have the same structure, if all jobs were
# successfully submitted this shouldn't have happened.
log.warning( "Problem matching up datasets while attempting to create implicit dataset collections")
continue
output = self.tool.outputs[ output_name ]
element_identifiers = structure.element_identifiers_for_outputs( trans, outputs )
implicit_collection_info = dict(
implicit_inputs=implicit_inputs,
implicit_output_name=output_name,
outputs=outputs
)
try:
output_collection_name = self.tool.tool_action.get_output_name(
output,
dataset=None,
tool=self.tool,
on_text=on_text,
trans=trans,
history=history,
params=params,
incoming=None,
job_params=None,
)
except Exception:
output_collection_name = "%s across %s" % ( self.tool.name, on_text )
child_element_identifiers = element_identifiers[ "element_identifiers" ]
collection_type = element_identifiers[ "collection_type" ]
collection = trans.app.dataset_collections_service.create(
trans=trans,
parent=history,
name=output_collection_name,
element_identifiers=child_element_identifiers,
collection_type=collection_type,
implicit_collection_info=implicit_collection_info,
)
for job in self.successful_jobs:
# TODO: Think through this, may only want this for output
# collections - or we may be already recording data in some
# other way.
if job not in trans.sa_session:
job = trans.sa_session.query( trans.app.model.Job ).get( job.id )
job.add_output_dataset_collection( output_name, collection )
collections[ output_name ] = collection
# Needed to flush the association created just above with
# job.add_output_dataset_collection.
trans.sa_session.flush()
self.implicit_collections = collections
__all__ = ( 'execute', )
|
test_client_reconnect.py
|
from concurrent import futures
import contextlib
import os
import threading
import sys
import grpc
import time
import random
import pytest
from typing import Any, Callable, Optional
from unittest.mock import patch
import ray
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
from ray.util.client.common import CLIENT_SERVER_MAX_THREADS, GRPC_OPTIONS
import ray.util.client.server.server as ray_client_server
from ray._private.client_mode_hook import disable_client_hook
# At a high level, these tests rely on an extra RPC server sitting
# between the client and the real Ray server to inject errors, drop responses
# and drop requests, i.e. at a high level:
# Ray Client <-> Middleman Server <-> Proxy Server
# Type for middleman hooks used to inject errors
Hook = Callable[[Any], None]
class MiddlemanDataServicer(ray_client_pb2_grpc.RayletDataStreamerServicer):
"""
Forwards all requests to the real data servicer. Useful for injecting
errors between a client and server pair.
"""
def __init__(self, on_response: Optional[Hook] = None):
"""
Args:
on_response: Optional hook to inject errors before sending back a
response
"""
self.stub = None
self.on_response = on_response
def set_channel(self, channel: grpc.Channel) -> None:
self.stub = ray_client_pb2_grpc.RayletDataStreamerStub(channel)
def Datapath(self, request_iterator, context):
try:
for response in self.stub.Datapath(
request_iterator, metadata=context.invocation_metadata()):
if self.on_response:
self.on_response(response)
yield response
except grpc.RpcError as e:
context.set_code(e.code())
context.set_details(e.details())
class MiddlemanLogServicer(ray_client_pb2_grpc.RayletLogStreamerServicer):
"""
Forwards all requests to the real log servicer. Useful for injecting
errors between a client and server pair.
"""
def __init__(self, on_response: Optional[Hook] = None):
"""
Args:
on_response: Optional hook to inject errors before sending back a
response
"""
self.stub = None
self.on_response = on_response
def set_channel(self, channel: grpc.Channel) -> None:
self.stub = ray_client_pb2_grpc.RayletLogStreamerStub(channel)
def Logstream(self, request_iterator, context):
try:
for response in self.stub.Logstream(
request_iterator, metadata=context.invocation_metadata()):
if self.on_response:
self.on_response(response)
yield response
except grpc.RpcError as e:
context.set_code(e.code())
context.set_details(e.details())
class MiddlemanRayletServicer(ray_client_pb2_grpc.RayletDriverServicer):
"""
Forwards all requests to the raylet driver servicer. Useful for injecting
errors between a client and server pair.
"""
def __init__(self,
on_request: Optional[Hook] = None,
on_response: Optional[Hook] = None):
"""
Args:
on_request: Optional hook to inject errors before forwarding a
request
on_response: Optional hook to inject errors before sending back a
response
"""
self.stub = None
self.on_request = on_request
self.on_response = on_response
def set_channel(self, channel: grpc.Channel) -> None:
self.stub = ray_client_pb2_grpc.RayletDriverStub(channel)
def _call_inner_function(
self, request: Any, context,
method: str) -> Optional[ray_client_pb2_grpc.RayletDriverStub]:
if self.on_request:
self.on_request(request)
try:
response = getattr(self.stub, method)(
request, metadata=context.invocation_metadata())
except grpc.RpcError as e:
context.set_code(e.code())
context.set_details(e.details())
raise
if self.on_response:
self.on_response(response)
return response
def Init(self, request, context=None) -> ray_client_pb2.InitResponse:
return self._call_inner_function(request, context, "Init")
def KVPut(self, request, context=None) -> ray_client_pb2.KVPutResponse:
return self._call_inner_function(request, context, "KVPut")
def KVGet(self, request, context=None) -> ray_client_pb2.KVGetResponse:
return self._call_inner_function(request, context, "KVGet")
def KVDel(self, request, context=None) -> ray_client_pb2.KVDelResponse:
return self._call_inner_function(request, context, "KVDel")
def KVList(self, request, context=None) -> ray_client_pb2.KVListResponse:
return self._call_inner_function(request, context, "KVList")
def KVExists(self, request,
context=None) -> ray_client_pb2.KVExistsResponse:
return self._call_inner_function(request, context, "KVExists")
def ListNamedActors(self, request, context=None
) -> ray_client_pb2.ClientListNamedActorsResponse:
return self._call_inner_function(request, context, "ListNamedActors")
def ClusterInfo(self, request,
context=None) -> ray_client_pb2.ClusterInfoResponse:
return self._call_inner_function(request, context, "ClusterInfo")
def Terminate(self, req, context=None):
return self._call_inner_function(req, context, "Terminate")
def GetObject(self, request, context=None):
return self._call_inner_function(request, context, "GetObject")
def PutObject(self, request: ray_client_pb2.PutRequest,
context=None) -> ray_client_pb2.PutResponse:
return self._call_inner_function(request, context, "PutObject")
def WaitObject(self, request: ray_client_pb2.WaitRequest,
context=None) -> ray_client_pb2.WaitResponse:
return self._call_inner_function(request, context, "WaitObject")
def Schedule(self, task: ray_client_pb2.ClientTask,
context=None) -> ray_client_pb2.ClientTaskTicket:
return self._call_inner_function(task, context, "Schedule")
class MiddlemanServer:
"""
Helper class that wraps the RPC server that middlemans the connection
between the client and the real ray server. Useful for injecting
errors between a client and server pair.
"""
def __init__(self,
listen_addr: str,
real_addr,
on_log_response: Optional[Hook] = None,
on_data_response: Optional[Hook] = None,
on_task_request: Optional[Hook] = None,
on_task_response: Optional[Hook] = None):
"""
Args:
listen_addr: The address the middleman server will listen on
real_addr: The address of the real ray server
on_log_response: Optional hook to inject errors before sending back
a log response
on_data_response: Optional hook to inject errors before sending
back a data response
on_task_request: Optional hook to inject errors before forwarding
a raylet driver request
on_task_response: Optional hook to inject errors before sending
back a raylet driver response
"""
self.listen_addr = listen_addr
self.real_addr = real_addr
self.server = grpc.server(
futures.ThreadPoolExecutor(max_workers=CLIENT_SERVER_MAX_THREADS),
options=GRPC_OPTIONS)
self.task_servicer = MiddlemanRayletServicer(
on_response=on_task_response, on_request=on_task_request)
self.data_servicer = MiddlemanDataServicer(
on_response=on_data_response)
self.logs_servicer = MiddlemanLogServicer(on_response=on_log_response)
ray_client_pb2_grpc.add_RayletDriverServicer_to_server(
self.task_servicer, self.server)
ray_client_pb2_grpc.add_RayletDataStreamerServicer_to_server(
self.data_servicer, self.server)
ray_client_pb2_grpc.add_RayletLogStreamerServicer_to_server(
self.logs_servicer, self.server)
self.server.add_insecure_port(self.listen_addr)
self.channel = None
self.reset_channel()
def reset_channel(self) -> None:
"""
Manually close and reopen the channel to the real ray server. This
simulates a disconnection between the client and the server.
"""
if self.channel:
self.channel.close()
self.channel = grpc.insecure_channel(
self.real_addr, options=GRPC_OPTIONS)
grpc.channel_ready_future(self.channel)
self.task_servicer.set_channel(self.channel)
self.data_servicer.set_channel(self.channel)
self.logs_servicer.set_channel(self.channel)
def start(self) -> None:
self.server.start()
def stop(self, grace: int) -> None:
self.server.stop(grace)
@contextlib.contextmanager
def start_middleman_server(on_log_response=None,
on_data_response=None,
on_task_request=None,
on_task_response=None):
"""
Helper context that starts a middleman server listening on port 10011,
and a ray client server on port 50051.
"""
ray._inside_client_test = True
server = ray_client_server.serve("localhost:50051")
middleman = None
try:
middleman = MiddlemanServer(
listen_addr="localhost:10011",
real_addr="localhost:50051",
on_log_response=on_log_response,
on_data_response=on_data_response,
on_task_request=on_task_response,
on_task_response=on_task_request)
middleman.start()
ray.init("ray://localhost:10011")
yield middleman, server
finally:
ray._inside_client_test = False
ray.util.disconnect()
if middleman:
middleman.stop(0)
server.stop(0)
del server
start = time.monotonic()
with disable_client_hook():
while ray.is_initialized():
time.sleep(1)
if time.monotonic() - start > 30:
raise RuntimeError("Failed to terminate Ray")
def test_disconnect_during_get():
"""
Disconnect the proxy and the client in the middle of a long running get
"""
@ray.remote
def slow_result():
time.sleep(20)
return 12345
def disconnect(middleman):
time.sleep(3)
middleman.reset_channel()
with start_middleman_server() as (middleman, _):
disconnect_thread = threading.Thread(
target=disconnect, args=(middleman, ))
disconnect_thread.start()
result = ray.get(slow_result.remote())
assert result == 12345
disconnect_thread.join()
def test_valid_actor_state():
"""
Repeatedly inject errors in the middle of mutating actor calls. Check
at the end that the final state of the actor is consistent with what
we would expect had the disconnects not occurred.
"""
@ray.remote
class IncrActor:
def __init__(self):
self.val = 0
def incr(self):
self.val += 1
return self.val
i = 0
# This is to prevent erroring in the initial connection logic.
started = False
def fail_every_seven(_):
# Inject an error every seventh time this method is called
nonlocal i, started
i += 1
if i % 7 == 0 and started:
raise RuntimeError
with start_middleman_server(
on_data_response=fail_every_seven,
on_task_request=fail_every_seven,
on_task_response=fail_every_seven):
started = True
actor = IncrActor.remote()
for _ in range(100):
ref = actor.incr.remote()
assert ray.get(ref) == 100
def test_valid_actor_state_2():
"""
Do a full disconnect (cancel channel) every 11 requests. Failure
happens:
- before request sent: request never reaches server
- before response received: response never reaches server
- while get's are being processed
"""
@ray.remote
class IncrActor:
def __init__(self):
self.val = 0
def incr(self):
self.val += 1
return self.val
i = 0
with start_middleman_server() as (middleman, _):
def fail_every_eleven(_):
nonlocal i
i += 1
if i % 11 == 0:
middleman.reset_channel()
middleman.data_servicer.on_response = fail_every_eleven
middleman.task_servicer.on_request = fail_every_eleven
middleman.task_servicer.on_response = fail_every_eleven
actor = IncrActor.remote()
for _ in range(100):
ref = actor.incr.remote()
assert ray.get(ref) == 100
def test_noisy_puts():
"""
Randomly kills the data channel with 10% chance when receiving response
(requests made it to server, responses dropped) and checks that final
result is still consistent
"""
random.seed(12345)
with start_middleman_server() as (middleman, _):
def fail_randomly(response: ray_client_pb2.DataResponse):
if random.random() < 0.1:
raise RuntimeError
middleman.data_servicer.on_response = fail_randomly
refs = [ray.put(i * 123) for i in range(500)]
results = ray.get(refs)
for i, result in enumerate(results):
assert result == i * 123
def test_client_reconnect_grace_period():
"""
Tests that the client gives up attempting to reconnect the channel
after the grace period expires.
"""
# Lower grace period to 5 seconds to save time
with patch.dict(os.environ, {"RAY_CLIENT_RECONNECT_GRACE_PERIOD": "5"}), \
start_middleman_server() as (middleman, _):
assert ray.get(ray.put(42)) == 42
# Close channel
middleman.channel.close()
start_time = time.time()
with pytest.raises(ConnectionError):
ray.get(ray.put(42))
# Connection error should have been raised within a reasonable
# amount of time. Set to significantly higher than 5 seconds
# to account for reconnect backoff timing
assert time.time() - start_time < 20
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
BasicStorage.py
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Run the basic tests for a storage as described in the official storage API
The most complete and most out-of-date description of the interface is:
http://www.zope.org/Documentation/Developer/Models/ZODB/ZODB_Architecture_Storage_Interface_Info.html
All storages should be able to pass these tests.
"""
import transaction
from ZODB import DB, POSException
from ZODB.Connection import TransactionMetaData
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle, zodb_pickle
from ZODB.tests.StorageTestBase import ZERO
from ZODB.tests.util import with_high_concurrency
import threading
import time
import zope.interface
import zope.interface.verify
from random import randint
from .. import utils
class BasicStorage(object):
def checkBasics(self):
self.assertEqual(self._storage.lastTransaction(), ZERO)
t = TransactionMetaData()
self._storage.tpc_begin(t)
self.assertRaises(POSException.StorageTransactionError,
self._storage.tpc_begin, t)
# Aborting is easy
self._storage.tpc_abort(t)
# Test a few expected exceptions when we're doing operations giving a
# different Transaction object than the one we've begun on.
self._storage.tpc_begin(t)
self.assertRaises(
POSException.StorageTransactionError,
self._storage.store,
ZERO, ZERO, b'', '', TransactionMetaData())
self.assertRaises(
POSException.StorageTransactionError,
self._storage.store,
ZERO, 1, b'2', '', TransactionMetaData())
self.assertRaises(
POSException.StorageTransactionError,
self._storage.tpc_vote, TransactionMetaData())
self._storage.tpc_abort(t)
def checkSerialIsNoneForInitialRevision(self):
eq = self.assertEqual
oid = self._storage.new_oid()
txn = TransactionMetaData()
self._storage.tpc_begin(txn)
# Use None for serial. Don't use _dostore() here because that coerces
# serial=None to serial=ZERO.
self._storage.store(oid, None, zodb_pickle(MinPO(11)),
'', txn)
self._storage.tpc_vote(txn)
newrevid = self._storage.tpc_finish(txn)
data, revid = utils.load_current(self._storage, oid)
value = zodb_unpickle(data)
eq(value, MinPO(11))
eq(revid, newrevid)
def checkStore(self):
revid = ZERO
newrevid = self._dostore(revid=None)
# Finish the transaction.
self.assertNotEqual(newrevid, revid)
def checkStoreAndLoad(self):
eq = self.assertEqual
oid = self._storage.new_oid()
self._dostore(oid=oid, data=MinPO(7))
data, revid = utils.load_current(self._storage, oid)
value = zodb_unpickle(data)
eq(value, MinPO(7))
# Now do a bunch of updates to an object
for i in range(13, 22):
revid = self._dostore(oid, revid=revid, data=MinPO(i))
# Now get the latest revision of the object
data, revid = utils.load_current(self._storage, oid)
eq(zodb_unpickle(data), MinPO(21))
def checkConflicts(self):
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
self._dostore(oid, revid=revid1, data=MinPO(12))
self.assertRaises(POSException.ConflictError,
self._dostore,
oid, revid=revid1, data=MinPO(13))
def checkWriteAfterAbort(self):
oid = self._storage.new_oid()
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
# Now abort this transaction
self._storage.tpc_abort(t)
# Now start all over again
oid = self._storage.new_oid()
self._dostore(oid=oid, data=MinPO(6))
def checkAbortAfterVote(self):
oid1 = self._storage.new_oid()
revid1 = self._dostore(oid=oid1, data=MinPO(-2))
oid = self._storage.new_oid()
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
# Now abort this transaction
self._storage.tpc_vote(t)
self._storage.tpc_abort(t)
# Now start all over again
oid = self._storage.new_oid()
revid = self._dostore(oid=oid, data=MinPO(6))
for oid, revid in [(oid1, revid1), (oid, revid)]:
data, _revid = utils.load_current(self._storage, oid)
self.assertEqual(revid, _revid)
def checkStoreTwoObjects(self):
noteq = self.assertNotEqual
p31, p32, p51, p52 = map(MinPO, (31, 32, 51, 52))
oid1 = self._storage.new_oid()
oid2 = self._storage.new_oid()
noteq(oid1, oid2)
revid1 = self._dostore(oid1, data=p31)
revid2 = self._dostore(oid2, data=p51)
noteq(revid1, revid2)
revid3 = self._dostore(oid1, revid=revid1, data=p32)
revid4 = self._dostore(oid2, revid=revid2, data=p52)
noteq(revid3, revid4)
def checkGetTid(self):
if not hasattr(self._storage, 'getTid'):
return
eq = self.assertEqual
p41, p42 = map(MinPO, (41, 42))
oid = self._storage.new_oid()
self.assertRaises(KeyError, self._storage.getTid, oid)
# Now store a revision
revid1 = self._dostore(oid, data=p41)
eq(revid1, self._storage.getTid(oid))
# And another one
revid2 = self._dostore(oid, revid=revid1, data=p42)
eq(revid2, self._storage.getTid(oid))
def checkLen(self):
# len(storage) reports the number of objects.
# check it is zero when empty
self.assertEqual(len(self._storage), 0)
# check it is correct when the storage contains two object.
# len may also be zero, for storages that do not keep track
# of this number
self._dostore(data=MinPO(22))
self._dostore(data=MinPO(23))
self.assertTrue(len(self._storage) in [0, 2])
def checkGetSize(self):
self._dostore(data=MinPO(25))
size = self._storage.getSize()
# The storage API doesn't make any claims about what size
# means except that it ought to be printable.
str(size)
def checkNote(self):
oid = self._storage.new_oid()
t = TransactionMetaData()
self._storage.tpc_begin(t)
t.note(u'this is a test')
self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
def checkInterfaces(self):
for iface in zope.interface.providedBy(self._storage):
zope.interface.verify.verifyObject(iface, self._storage)
def checkMultipleEmptyTransactions(self):
# There was a bug in handling empty transactions in mapping
# storage that caused the commit lock not to be released. :(
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
t = TransactionMetaData()
self._storage.tpc_begin(t) # Hung here before
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
def _do_store_in_separate_thread(self, oid, revid, voted):
# We'll run the competing trans in a separate thread:
thread = threading.Thread(name='T2',
target=self._dostore, args=(oid,),
kwargs=dict(revid=revid))
thread.daemon = True
thread.start()
thread.join(.1)
return thread
def check_checkCurrentSerialInTransaction(self):
oid = b'\0\0\0\0\0\0\0\xf0'
tid = self._dostore(oid)
tid2 = self._dostore(oid, revid=tid)
data = b'cpersistent\nPersistent\nq\x01.N.' # a simple persistent obj
# ---------------------------------------------------------------------
# stale read
t = TransactionMetaData()
self._storage.tpc_begin(t)
try:
self._storage.store(b'\0\0\0\0\0\0\0\xf1',
b'\0\0\0\0\0\0\0\0', data, '', t)
self._storage.checkCurrentSerialInTransaction(oid, tid, t)
self._storage.tpc_vote(t)
except POSException.ReadConflictError as v:
self.assertEqual(v.oid, oid)
self.assertEqual(v.serials, (tid2, tid))
else:
if 0:
self.assertTrue(False, "No conflict error")
self._storage.tpc_abort(t)
# ---------------------------------------------------------------------
# non-stale read, no stress. :)
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(b'\0\0\0\0\0\0\0\xf2',
b'\0\0\0\0\0\0\0\0', data, '', t)
self._storage.checkCurrentSerialInTransaction(oid, tid2, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
# ---------------------------------------------------------------------
# non-stale read, competition after vote. The competing
# transaction must produce a tid > this transaction's tid
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(b'\0\0\0\0\0\0\0\xf3',
b'\0\0\0\0\0\0\0\0', data, '', t)
self._storage.checkCurrentSerialInTransaction(oid, tid2, t)
self._storage.tpc_vote(t)
# We'll run the competing trans in a separate thread:
thread = self._do_store_in_separate_thread(oid, tid2, True)
self._storage.tpc_finish(t)
thread.join(33)
tid3 = utils.load_current(self._storage, oid)[1]
self.assertTrue(tid3 >
utils.load_current(
self._storage, b'\0\0\0\0\0\0\0\xf3')[1])
# ---------------------------------------------------------------------
# non-stale competing trans after checkCurrentSerialInTransaction
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(b'\0\0\0\0\0\0\0\xf4',
b'\0\0\0\0\0\0\0\0', data, '', t)
self._storage.checkCurrentSerialInTransaction(oid, tid3, t)
thread = self._do_store_in_separate_thread(oid, tid3, False)
# There are 2 possibilities:
# 1. The store happens before this transaction completes,
# in which case, the vote below fails.
# 2. The store happens after this trans, in which case, the
# tid of the object is greater than this transaction's tid.
try:
self._storage.tpc_vote(t)
except POSException.ReadConflictError:
thread.join() # OK :)
else:
self._storage.tpc_finish(t)
thread.join()
tid4 = utils.load_current(self._storage, oid)[1]
self.assertTrue(
tid4 >
utils.load_current(self._storage, b'\0\0\0\0\0\0\0\xf4')[1])
def check_tid_ordering_w_commit(self):
# It's important that storages always give a consistent
# ordering for revisions, tids. This is most likely to fail
# around commit. Here we'll do some basic tests to check this.
# We'll use threads to arrange for ordering to go wrong and
# verify that a storage gets it right.
# First, some initial data.
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(ZERO, ZERO, b'x', '', t)
self._storage.tpc_vote(t)
tids = []
self._storage.tpc_finish(t, lambda tid: tids.append(tid))
# OK, now we'll start a new transaction, take it to finish,
# and then block finish while we do some other operations.
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(ZERO, tids[0], b'y', '', t)
self._storage.tpc_vote(t)
to_join = []
def run_in_thread(func):
t = threading.Thread(target=func)
t.daemon = True
t.start()
to_join.append(t)
started = threading.Event()
finish = threading.Event()
@run_in_thread
def commit():
def callback(tid):
started.set()
tids.append(tid)
finish.wait()
self._storage.tpc_finish(t, callback)
results = {}
started.wait()
attempts = []
attempts_cond = utils.Condition()
def update_attempts():
with attempts_cond:
attempts.append(1)
attempts_cond.notify_all()
@run_in_thread
def load():
update_attempts()
results['load'] = utils.load_current(self._storage, ZERO)[1]
results['lastTransaction'] = self._storage.lastTransaction()
expected_attempts = 1
if hasattr(self._storage, 'getTid'):
expected_attempts += 1
@run_in_thread
def getTid():
update_attempts()
results['getTid'] = self._storage.getTid(ZERO)
if hasattr(self._storage, 'lastInvalidations'):
expected_attempts += 1
@run_in_thread
def lastInvalidations():
update_attempts()
invals = self._storage.lastInvalidations(1)
if invals:
results['lastInvalidations'] = invals[0][0]
with attempts_cond:
while len(attempts) < expected_attempts:
attempts_cond.wait()
time.sleep(.01) # for good measure :)
finish.set()
for t in to_join:
t.join(1)
self.assertEqual(results.pop('load'), tids[1])
self.assertEqual(results.pop('lastTransaction'), tids[1])
for m, tid in results.items():
self.assertEqual(tid, tids[1])
# verify storage/Connection for race in between load/open and local
# invalidations.
# https://github.com/zopefoundation/ZEO/issues/166
# https://github.com/zopefoundation/ZODB/issues/290
@with_high_concurrency
def check_race_loadopen_vs_local_invalidate(self):
db = DB(self._storage)
# init initializes the database with two integer objects - obj1/obj2
# that are set to 0.
def init():
transaction.begin()
zconn = db.open()
root = zconn.root()
root['obj1'] = MinPO(0)
root['obj2'] = MinPO(0)
transaction.commit()
zconn.close()
# verify accesses obj1/obj2 and verifies that obj1.value == obj2.value
#
# access to obj1 is organized to always trigger loading from zstor.
# access to obj2 goes through zconn cache and so verifies whether the
# cache is not stale.
failed = threading.Event()
failure = [None]
def verify():
transaction.begin()
zconn = db.open()
root = zconn.root()
obj1 = root['obj1']
obj2 = root['obj2']
# obj1 - reload it from zstor
# obj2 - get it from zconn cache
obj1._p_invalidate()
# both objects must have the same values
v1 = obj1.value
v2 = obj2.value
if v1 != v2:
failure[0] = "verify: obj1.value (%d) != obj2.value (%d)" % (
v1, v2)
failed.set()
# we did not changed anything; also fails with commit:
transaction.abort()
zconn.close()
# modify changes obj1/obj2 by doing `objX.value += 1`.
#
# Since both objects start from 0, the invariant that
# `obj1.value == obj2.value` is always preserved.
def modify():
transaction.begin()
zconn = db.open()
root = zconn.root()
obj1 = root['obj1']
obj2 = root['obj2']
obj1.value += 1
obj2.value += 1
assert obj1.value == obj2.value
transaction.commit()
zconn.close()
# xrun runs f in a loop until either N iterations, or until failed is
# set.
def xrun(f, N):
try:
for i in range(N):
# print('%s.%d' % (f.__name__, i))
f()
if failed.is_set():
break
except: # noqa: E722 do not use bare 'except'
failed.set()
raise
# loop verify and modify concurrently.
init()
N = 500
tverify = threading.Thread(
name='Tverify', target=xrun, args=(verify, N))
tmodify = threading.Thread(
name='Tmodify', target=xrun, args=(modify, N))
tverify.start()
tmodify.start()
tverify.join(60)
tmodify.join(60)
if failed.is_set():
self.fail(failure[0])
# client-server storages like ZEO, NEO and RelStorage allow several storage
# clients to be connected to single storage server.
#
# For client-server storages test subclasses should implement
# _new_storage_client to return new storage client that is connected to the
# same storage server self._storage is connected to.
def _new_storage_client(self):
raise NotImplementedError
# verify storage for race in between load and external invalidations.
# https://github.com/zopefoundation/ZEO/issues/155
#
# This test is similar to check_race_loadopen_vs_local_invalidate but does
# not reuse its code because the probability to reproduce external
# invalidation bug with only 1 mutator + 1 verifier is low.
@with_high_concurrency
def check_race_load_vs_external_invalidate(self):
# dbopen creates new client storage connection and wraps it with DB.
def dbopen():
try:
zstor = self._new_storage_client()
except NotImplementedError:
# the test will be skipped from main thread because dbopen is
# first used in init on the main thread before any other thread
# is spawned.
self.skipTest(
"%s does not implement _new_storage_client" % type(self))
return DB(zstor)
# init initializes the database with two integer objects - obj1/obj2
# that are set to 0.
def init():
db = dbopen()
transaction.begin()
zconn = db.open()
root = zconn.root()
root['obj1'] = MinPO(0)
root['obj2'] = MinPO(0)
transaction.commit()
zconn.close()
db.close()
# we'll run 8 T workers concurrently. As of 20210416, due to race
# conditions in ZEO, it triggers the bug where T sees stale obj2 with
# obj1.value != obj2.value
#
# The probability to reproduce the bug is significantly reduced with
# decreasing n(workers): almost never with nwork=2 and sometimes with
# nwork=4.
nwork = 8
# T is a worker that accesses obj1/obj2 in a loop and verifies
# `obj1.value == obj2.value` invariant.
#
# access to obj1 is organized to always trigger loading from zstor.
# access to obj2 goes through zconn cache and so verifies whether the
# cache is not stale.
#
# Once in a while T tries to modify obj{1,2}.value maintaining the
# invariant as test source of changes for other workers.
failed = threading.Event()
failure = [None] * nwork # [tx] is failure from T(tx)
def T(tx, N):
db = dbopen()
def t_():
transaction.begin()
zconn = db.open()
root = zconn.root()
obj1 = root['obj1']
obj2 = root['obj2']
# obj1 - reload it from zstor
# obj2 - get it from zconn cache
obj1._p_invalidate()
# both objects must have the same values
i1 = obj1.value
i2 = obj2.value
if i1 != i2:
# print('FAIL')
failure[tx] = (
"T%s: obj1.value (%d) != obj2.value (%d)" % (
tx, i1, i2))
failed.set()
# change objects once in a while
if randint(0, 4) == 0:
# print("T%s: modify" % tx)
obj1.value += 1
obj2.value += 1
try:
transaction.commit()
except POSException.ConflictError:
# print('conflict -> ignore')
transaction.abort()
zconn.close()
try:
for i in range(N):
# print('T%s.%d' % (tx, i))
t_()
if failed.is_set():
break
except: # noqa: E722 do not use bare 'except'
failed.set()
raise
finally:
db.close()
# run the workers concurrently.
init()
N = 100
tg = []
for x in range(nwork):
t = threading.Thread(name='T%d' % x, target=T, args=(x, N))
t.start()
tg.append(t)
for t in tg:
t.join(60)
if failed.is_set():
self.fail([_ for _ in failure if _])
|
synchronous_functions.py
|
import pandas as pd
from ibapi.client import EClient
from ibapi.common import OrderId
from ibapi.wrapper import EWrapper
import threading
import time
from datetime import datetime
import os
APP_DATA_PATH = f"{os.getenv('TESTAPP_DATA_PATH')}\submitted_orders.csv"
default_hostname = '127.0.0.1'
default_port = 7497
default_client_id = 10645
DEFAULT_ERROR_CODE = -999
SHORT_SLEEP_SECONDS = 0.1
MEDIUM_SLEEP_SECONDS = 0.5
LONG_SLEEP_SECONDS = 5
WARNING_ERROR_CODES = [399, 504, 2104, 2168, 2169]
class ibkr_app(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.error_messages = pd.DataFrame(columns=[
'reqId', 'errorCode', 'errorString'
])
self.next_valid_id = None
self.current_time = None
self.historical_data_end = ''
self.contract_details = None
self.contract_details_end = None
self.contract_details_reqId = None
self.order_reqId_end = None
self.order_reqId = None
self.order_status = pd.DataFrame(
columns=['orderId', 'status', 'filled', 'remaining', 'avgFillPrice',
'permId', 'parentId', 'lastFillPrice', 'clientId',
'whyHeld', 'mktCapPrice'])
self.historical_data = pd.DataFrame(columns=['date', 'open', 'close', 'high', 'low'])
self.managed_accounts = ''
def error(self, reqId, errorCode, errorString):
if reqId == self.contract_details_reqId:
print(f'Error from contract details api: errorCode={errorCode} errorMessage={errorString}')
self.contract_details_end = DEFAULT_ERROR_CODE
elif reqId == self.order_reqId_end:
print(f'Error from place order api: errorCode={errorCode} errorMessage={errorString}')
self.order_reqId_end = DEFAULT_ERROR_CODE
if (reqId != -1) and errorCode not in WARNING_ERROR_CODES:
print("Error: ", reqId, " ", errorCode, " ", errorString)
print("Closing connection!")
self.disconnect()
self.error_messages = pd.concat(
[self.error_messages, pd.DataFrame({
"reqId": [reqId],
"errorCode": [errorCode],
"errorString": [errorString]
})])
def managedAccounts(self, accountsList):
self.managed_accounts = [i for i in accountsList.split(",") if i]
def nextValidId(self, orderId: int):
self.next_valid_id = orderId
def currentTime(self, time: int):
self.current_time = datetime.fromtimestamp(time).astimezone().isoformat()
def historicalData(self, reqId, bar):
record = pd.DataFrame(
{'date': [bar.date],
'open': [bar.open],
'high': [bar.high],
'low': [bar.low],
'close': [bar.close]
})
record['date'] = pd.to_datetime(record['date'])
self.historical_data = pd.concat([self.historical_data, record], ignore_index=True)
def historicalDataEnd(self, reqId: int, start: str, end: str):
print("HistoricalDataEnd. ReqId:", reqId, "from", start, "to", end)
self.historical_data_end = reqId
def contractDetails(self, reqId: int, contractDetails):
self.contract_details = contractDetails
def contractDetailsEnd(self, reqId: int):
self.contract_details_end = reqId
def orderStatus(self, orderId: OrderId, status: str, filled: float,
remaining: float, avgFillPrice: float, permId: int,
parentId: int, lastFillPrice: float, clientId: int,
whyHeld: str, mktCapPrice: float):
print(f'Order {orderId} status is {status}')
self.order_reqId_end = orderId
self.order_status = pd.concat(
[
self.order_status,
pd.DataFrame({
'order_id': [orderId],
'status': [status],
'filled': [filled],
'remaining': [remaining],
'avg_fill_price': [avgFillPrice],
'perm_id': [permId],
'parent_id': [parentId],
'last_fill_price': [lastFillPrice],
'client_id': [clientId],
'why_held': [whyHeld],
'mkt_cap_price': [mktCapPrice]
})
],
ignore_index=True
)
self.order_status.drop_duplicates(inplace=True)
def fetch_managed_accounts(hostname=default_hostname, port=default_port,
client_id=default_client_id):
app = ibkr_app()
app.connect(hostname, port, client_id)
while not app.isConnected():
time.sleep(SHORT_SLEEP_SECONDS)
def run_loop():
app.run()
api_thread = threading.Thread(target=run_loop, daemon=True)
api_thread.start()
while isinstance(app.next_valid_id, type(None)):
time.sleep(SHORT_SLEEP_SECONDS)
app.disconnect()
return app.managed_accounts
def fetch_historical_data(contract, endDateTime='', durationStr='30 D',
barSizeSetting='1 hour', whatToShow='MIDPOINT',
useRTH=True, hostname=default_hostname,
port=default_port, client_id=default_client_id):
app = ibkr_app()
app.connect(hostname, port, client_id)
while not app.isConnected():
time.sleep(SHORT_SLEEP_SECONDS)
def run_loop():
app.run()
api_thread = threading.Thread(target=run_loop, daemon=True)
api_thread.start()
while isinstance(app.next_valid_id, type(None)):
time.sleep(SHORT_SLEEP_SECONDS)
tickerId = app.next_valid_id
app.reqHistoricalData(
tickerId, contract, endDateTime, durationStr, barSizeSetting,
whatToShow, useRTH, formatDate=1, keepUpToDate=False, chartOptions=[])
while app.historical_data_end != tickerId:
time.sleep(MEDIUM_SLEEP_SECONDS)
if not app.isConnected():
break
app.disconnect()
return app.historical_data
def fetch_contract_details(contract, hostname=default_hostname,
port=default_port, client_id=default_client_id):
app = ibkr_app()
app.connect(hostname, port, client_id)
while not app.isConnected():
time.sleep(SHORT_SLEEP_SECONDS)
def run_loop():
app.run()
api_thread = threading.Thread(target=run_loop, daemon=True)
api_thread.start()
while isinstance(app.next_valid_id, type(None)):
time.sleep(SHORT_SLEEP_SECONDS)
app.contract_details_reqId = app.next_valid_id
app.reqContractDetails(app.contract_details_reqId, contract)
while (app.contract_details_end != app.contract_details_reqId) and (app.contract_details_end != DEFAULT_ERROR_CODE):
time.sleep(MEDIUM_SLEEP_SECONDS)
if not app.isConnected():
break
app.disconnect()
return app.contract_details
def save_order(contract, order, app):
print(f'Saving order...')
status = app.order_status[app.order_status['order_id'] == app.order_reqId]
client_id = int(status['client_id'].values[0])
perm_id = int(status['perm_id'].values[0])
lmt_price = f'{order.lmtPrice:.2f}' if order.orderType == 'LMT' else 'N/A'
data = pd.read_csv(APP_DATA_PATH)
data = pd.concat(
[data,
pd.DataFrame({
'timestamp': [app.current_time],
'order_id': [app.order_reqId],
'client_id': [client_id],
'perm_id': [perm_id],
'con_id': [0],
'symbol': [contract.symbol],
'action': [order.action],
'size': [order.totalQuantity],
'order_type': [order.orderType],
'lmt_price': [lmt_price]
})
],
ignore_index=True
)
data.to_csv(APP_DATA_PATH, index=False)
print(f'Order saved!')
def submit_order(contract, order, hostname=default_hostname,
port=default_port, client_id=default_client_id):
app = ibkr_app()
app.connect(hostname, port, client_id)
while not app.isConnected():
time.sleep(SHORT_SLEEP_SECONDS)
def run_loop():
app.run()
api_thread = threading.Thread(target=run_loop, daemon=True)
api_thread.start()
while isinstance(app.next_valid_id, type(None)):
time.sleep(SHORT_SLEEP_SECONDS)
app.order_reqId = app.next_valid_id
app.reqCurrentTime()
app.placeOrder(app.order_reqId, contract, order)
while (app.order_reqId_end != app.order_reqId) and (app.order_reqId_end != DEFAULT_ERROR_CODE):
# while not ('Submitted' in set(app.order_status['status'])):
time.sleep(MEDIUM_SLEEP_SECONDS)
if not app.isConnected():
break
app.disconnect()
if app.order_reqId_end == DEFAULT_ERROR_CODE:
msg = f'Order {app.order_reqId} did not succeed'
else:
msg = f'Order {app.order_reqId} successfully submitted'
save_order(contract, order, app)
print(msg)
return msg
|
isp_renode.py
|
import socket
import select
import threading
import time
import sys
import subprocess
import os
import logging
import isp_utils
logger = logging.getLogger()
# set timeout seconds
timeout_seconds = 60
# configure the test log files
uart_port = 4444
uart_log_file = "uart.log"
status_port = 3344
status_log_file = "pex.log"
renode_port = 3320
renode_log_file = "sim.log"
process_exit = False
def watchdog():
global process_exit
for i in range(timeout_seconds * 10):
if not process_exit:
time.sleep(0.1)
logging.warn("Watchdog timeout")
process_exit = True
def socketConnect(host, port):
global process_exit
res = None
connecting = True
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while connecting and not process_exit:
try:
s.connect((host, port))
connecting = False
res = s
logging.debug("connected {0}:{1}".format(host, port))
except:
logging.debug("trying {0}:{1}...".format(host, port))
time.sleep(1)
if connecting:
logger.error("Failed to connect {0}:{1}...".format(host, port))
return res
def logPort(terminate_msg, log_file, port):
global process_exit
data = ""
logger.info("Logging to: {}".format(log_file))
f = open(log_file, "w")
s = socketConnect(socket.gethostname(), port)
while(s and not process_exit):
time.sleep(1)
if terminate_msg in data:
process_exit = True
data = ""
ready_r, ready_w, err = select.select([s], [], [],1)
if ready_r:
data = s.recv(1024).decode().replace('\r', '')
f.write(data)
if s:
s.close()
f.close()
def logUart(run_dir, runtime):
uart_log_path = os.path.join(run_dir, uart_log_file)
logPort(isp_utils.terminateMessage(runtime), uart_log_path, uart_port)
def logStatus(run_dir):
status_log_path = os.path.join(run_dir, status_log_file)
logPort("Policy Violation:", status_log_path, status_port)
if "Policy Violation:" in open(status_log_path, 'r').read():
logger.warn("Process exited due to policy violation")
def launchRenode(run_dir):
global process_exit
try:
cmd = ["renode",
"--plain",
"--disable-xwt",
"--hide-log",
"--port={}".format(renode_port)]
logger.debug("Running command: {}".format(cmd))
renode_log_path = os.path.join(run_dir, renode_log_file)
process = subprocess.Popen(cmd, stdout=open(renode_log_path, 'w'), stderr=subprocess.STDOUT)
while process.poll() is None:
time.sleep(0.01)
finally:
process_exit = True
try:
process.kill()
except OSError:
logger.debug("Renode process no longer exists")
pass
def runSim(exe_path, run_dir, policy_dir, runtime, rule_cache,
gdb_port, extra, soc_cfg, use_validator=True):
global process_exit
global connecting
doRescScript(exe_path, run_dir, policy_dir, gdb_port)
if use_validator == False:
if isp_utils.generateTagInfo(exe_path, run_dir, policy_dir) is False:
return retVals.TAG_FAIL
try:
logger.debug("Begin Renode test... (timeout: {})".format(timeout_seconds))
wd = threading.Thread(target=watchdog)
wd.start()
logger.debug("Start Renode server...")
renode = threading.Thread(target=launchRenode, args=(run_dir,))
renode.start()
time.sleep(2)
logger.debug("Start Logging...")
uart_logger = threading.Thread(target=logUart, args=(run_dir, runtime))
uart_logger.start()
logging.info(run_dir)
status_logger = threading.Thread(target=logStatus, args=(run_dir,))
status_logger.start()
logger.debug("Connecting to Renode server...")
s = socketConnect(socket.gethostname(), renode_port)
logger.debug("Connected.")
if s:
with open(os.path.join(run_dir, "main.resc"), 'r') as f:
s.send(f.read().replace('\n', '\r\n').encode())
s.send('start\r\n'.encode())
while not process_exit:
time.sleep(0.1)
ready_r, ready_w, err = select.select([s], [], [],1)
if ready_r:
print(s.recv(1024).decode().replace('\r', ''))
if s:
try:
s.send('quit\r\n'.encode())
time.sleep(1)
s.close()
except:
pass
wd.join()
uart_logger.join()
status_logger.join()
renode.join()
finally:
try:
if s:
s.send('quit\r\n'.encode())
time.sleep(1)
s.close()
except:
pass
return retVals.SUCCESS
def doRescScript(exe_path, run_dir, policy_dir, gdb_port):
resc_script = rescScript(exe_path, run_dir, policy_dir, gdb_port)
with open(os.path.join(run_dir, "main.resc"), 'w') as f:
f.write(resc_script)
def rescScript(exe_path, run_dir, policy_dir, gdb_port):
gdb_command = ""
if gdb_port != 0:
gdb_command = "sysbus.ap_core StartGdbServer {}".format(gdb_port)
return """
mach create
machine LoadPlatformDescription @platforms/boards/dover-riscv-board.repl
sysbus.ap_core MaximumBlockSize 1
emulation CreateServerSocketTerminal {uart_port} "uart-socket"
connector Connect sysbus.uart1 uart-socket
#showAnalyzer sysbus.uart Antmicro.Renode.UI.ConsoleWindowBackendAnalyzer
#emulation CreateUartPtyTerminal "uart-pty" "/tmp/uart-pty"
#connector Connect sysbus.uart uart-pty
sysbus LoadELF @{exe_path}
sysbus.ap_core SetExternalValidator @{policy_dir}/librv32-renode-validator.so @{run_dir}/validator_cfg.yml
{gdb_command}
logLevel 1 sysbus.ap_core
sysbus.ap_core StartStatusServer {status_port}
""".format(exe_path=exe_path, run_dir=run_dir,
policy_dir=policy_dir, gdb_command=gdb_command,
uart_port=uart_port, status_port=status_port)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.